Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ go 1.24.2
require (
github.com/IBM-Cloud/bluemix-go v0.0.0-20251001005609-37dbcddbe871
github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20240725064144-454a2ae23113
github.com/IBM-Cloud/power-go-client v1.13.0
github.com/IBM-Cloud/power-go-client v1.14.0-beta4
github.com/IBM/appconfiguration-go-admin-sdk v0.5.1
github.com/IBM/appid-management-go-sdk v0.0.0-20210908164609-dd0e0eaf732f
github.com/IBM/cloud-databases-go-sdk v0.8.0
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ github.com/IBM-Cloud/bluemix-go v0.0.0-20251001005609-37dbcddbe871 h1:0NrvGUWBoG
github.com/IBM-Cloud/bluemix-go v0.0.0-20251001005609-37dbcddbe871/go.mod h1:lU1/3aolIs4y062yTTokFEiIEssAZqqjdj/5qvkBeq8=
github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20240725064144-454a2ae23113 h1:f2Erqfea1dKpaTFagTJM6W/wnD3JGq/Vn9URh8nuRwk=
github.com/IBM-Cloud/container-services-go-sdk v0.0.0-20240725064144-454a2ae23113/go.mod h1:xUQL9SGAjoZFd4GNjrjjtEpjpkgU7RFXRyHesbKTjiY=
github.com/IBM-Cloud/power-go-client v1.13.0 h1:TqxPlkJe0VkNdV9hYOD5NRepxEFhhyKXWXfg22x2zhU=
github.com/IBM-Cloud/power-go-client v1.13.0/go.mod h1:SpTK1ttW8bfMNUVQS8qOEuWn2KOkzaCLyzfze8MG1JE=
github.com/IBM-Cloud/power-go-client v1.14.0-beta4 h1:rU3gBpw0fuIZ9dbYEIHnIqMXHqXPIM2PlC7DgCTOCeA=
github.com/IBM-Cloud/power-go-client v1.14.0-beta4/go.mod h1:SpTK1ttW8bfMNUVQS8qOEuWn2KOkzaCLyzfze8MG1JE=
github.com/IBM-Cloud/softlayer-go v1.0.5-tf h1:koUAyF9b6X78lLLruGYPSOmrfY2YcGYKOj/Ug9nbKNw=
github.com/IBM-Cloud/softlayer-go v1.0.5-tf/go.mod h1:6HepcfAXROz0Rf63krk5hPZyHT6qyx2MNvYyHof7ik4=
github.com/IBM/appconfiguration-go-admin-sdk v0.5.1 h1:EAotl3yQ/u5u/uBryySJMm0COgsYhtNzQ1g2IChtlE0=
Expand Down
2 changes: 2 additions & 0 deletions ibm/service/power/ibm_pi_constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ const (
Arg_StorageType = "pi_storage_type"
Arg_SysType = "pi_sys_type"
Arg_Target = "pi_target"
Arg_TargetCRN = "pi_target_crn"
Arg_TargetStorageTier = "pi_target_storage_tier"
Arg_Type = "pi_type"
Arg_UserData = "pi_user_data"
Expand Down Expand Up @@ -451,6 +452,7 @@ const (
Attr_ReplicationPoolMap = "replication_pool_map"
Attr_ReplicationSites = "replication_sites"
Attr_ReplicationStatus = "replication_status"
Attr_ReplicationTargetCRN = "replication_target_crn"
Attr_ReplicationType = "replication_type"
Attr_ReservedCore = "reserved_core"
Attr_ReservedCores = "reserved_cores"
Expand Down
110 changes: 80 additions & 30 deletions ibm/service/power/resource_ibm_pi_volume_group.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,11 @@ func ResourceIBMPIVolumeGroup() *schema.Resource {
Optional: true,
Type: schema.TypeString,
},
Arg_TargetCRN: {
Description: "Target CRN of the secondary workspace where the auxiliary data resides; optional; if specified, the auxiliary volumes for the primary volumes getting added to the new volume group will be automatically onboarded into the secondary workspace and added to the corresponding auxiliary consistency group.",
Optional: true,
Type: schema.TypeString,
},
Arg_VolumeGroupName: {
ConflictsWith: []string{Arg_ConsistencyGroupName},
Description: "Volume Group Name to create",
Expand Down Expand Up @@ -80,6 +85,11 @@ func ResourceIBMPIVolumeGroup() *schema.Resource {
Description: "Volume Group Replication Status",
Type: schema.TypeString,
},
Attr_ReplicationTargetCRN: {
Computed: true,
Description: "CRN of the replication target workspace; for a primary replicated volume this is the target workspace that owns the auxiliary data; for an auxiliary replicated volume this is the target workspace that owns the primary data.",
Type: schema.TypeString,
},
Attr_StatusDescriptionErrors: {
Computed: true,
Description: "The status details of the volume group.",
Expand Down Expand Up @@ -119,18 +129,22 @@ func ResourceIBMPIVolumeGroup() *schema.Resource {
}
}

func resourceIBMPIVolumeGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
func resourceIBMPIVolumeGroupCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
sess, err := meta.(conns.ClientSession).IBMPISession()
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("IBMPISession failed: %s", err.Error()), "ibm_pi_volume_group", "create")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

vgName := d.Get(Arg_VolumeGroupName).(string)
cloudInstanceID := d.Get(Arg_CloudInstanceID).(string)
body := &models.VolumeGroupCreate{
Name: vgName,
}

if v, ok := d.GetOk(Arg_TargetCRN); ok {
body.TargetCRN = v.(string)
}
volids := flex.ExpandStringList((d.Get(Arg_VolumeIDs).(*schema.Set)).List())
body.VolumeIDs = volids

Expand All @@ -141,35 +155,45 @@ func resourceIBMPIVolumeGroupCreate(ctx context.Context, d *schema.ResourceData,
client := instance.NewIBMPIVolumeGroupClient(ctx, sess, cloudInstanceID)
vg, err := client.CreateVolumeGroup(body)
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("CreateVolumeGroup failed: %s", err.Error()), "ibm_pi_volume_group", "create")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

d.SetId(fmt.Sprintf("%s/%s", cloudInstanceID, *vg.ID))

_, err = isWaitForIBMPIVolumeGroupAvailable(ctx, client, *vg.ID, d.Timeout(schema.TimeoutCreate))
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("isWaitForIBMPIVolumeGroupAvailable failed: %s", err.Error()), "ibm_pi_volume_group", "create")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

return resourceIBMPIVolumeGroupRead(ctx, d, meta)
}

func resourceIBMPIVolumeGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
func resourceIBMPIVolumeGroupRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
sess, err := meta.(conns.ClientSession).IBMPISession()
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("IBMPISession failed: %s", err.Error()), "ibm_pi_volume_group", "read")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

cloudInstanceID, vgID, err := splitID(d.Id())
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("splitID failed: %s", err.Error()), "ibm_pi_volume_group", "read")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

client := instance.NewIBMPIVolumeGroupClient(ctx, sess, cloudInstanceID)

vg, err := client.GetDetails(vgID)
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("GetDetails failed: %s", err.Error()), "ibm_pi_volume_group", "read")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

d.Set(Arg_VolumeGroupName, vg.Name)
Expand All @@ -182,52 +206,71 @@ func resourceIBMPIVolumeGroupRead(ctx context.Context, d *schema.ResourceData, m
}
d.Set(Attr_VolumeGroupID, vg.ID)
d.Set(Attr_VolumeGroupStatus, vg.Status)
d.Set(Attr_ReplicationTargetCRN, vg.ReplicationTargetCRN)

return nil
}

func resourceIBMPIVolumeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
func resourceIBMPIVolumeGroupUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {

sess, err := meta.(conns.ClientSession).IBMPISession()
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("IBMPISession failed: %s", err.Error()), "ibm_pi_volume_group", "update")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

cloudInstanceID, vgID, err := splitID(d.Id())
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("splitID failed: %s", err.Error()), "ibm_pi_volume_group", "update")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

client := instance.NewIBMPIVolumeGroupClient(ctx, sess, cloudInstanceID)
body := &models.VolumeGroupUpdate{}
hasChange := false
if d.HasChanges(Arg_VolumeIDs) {
old, new := d.GetChange(Arg_VolumeIDs)
oldList := old.(*schema.Set)
newList := new.(*schema.Set)
body := &models.VolumeGroupUpdate{
AddVolumes: flex.ExpandStringList(newList.Difference(oldList).List()),
RemoveVolumes: flex.ExpandStringList(oldList.Difference(newList).List()),
}
err := client.UpdateVolumeGroup(vgID, body)
body.AddVolumes = flex.ExpandStringList(newList.Difference(oldList).List())
body.RemoveVolumes = flex.ExpandStringList(oldList.Difference(newList).List())
hasChange = true
}
if d.HasChanges(Arg_TargetCRN) {
body.TargetCRN = d.Get(Arg_TargetCRN).(string)
hasChange = true
}
if hasChange {
err = client.UpdateVolumeGroup(vgID, body)
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("UpdateVolumeGroup failed: %s", err.Error()), "ibm_pi_volume_group", "update")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
_, err = isWaitForIBMPIVolumeGroupAvailable(ctx, client, vgID, d.Timeout(schema.TimeoutUpdate))
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("isWaitForIBMPIVolumeGroupAvailable failed: %s", err.Error()), "ibm_pi_volume_group", "update")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
}

return resourceIBMPIVolumeGroupRead(ctx, d, meta)
}
func resourceIBMPIVolumeGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
func resourceIBMPIVolumeGroupDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
sess, err := meta.(conns.ClientSession).IBMPISession()
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("IBMPISession failed: %s", err.Error()), "ibm_pi_volume_group", "delete")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

cloudInstanceID, vgID, err := splitID(d.Id())
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("splitID failed: %s", err.Error()), "ibm_pi_volume_group", "delete")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

client := instance.NewIBMPIVolumeGroupClient(ctx, sess, cloudInstanceID)
Expand All @@ -239,27 +282,34 @@ func resourceIBMPIVolumeGroupDelete(ctx context.Context, d *schema.ResourceData,
}
err = client.UpdateVolumeGroup(vgID, body)
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("UpdateVolumeGroup failed: %s", err.Error()), "ibm_pi_volume_group", "delete")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
_, err = isWaitForIBMPIVolumeGroupAvailable(ctx, client, vgID, d.Timeout(schema.TimeoutUpdate))
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("isWaitForIBMPIVolumeGroupAvailable failed: %s", err.Error()), "ibm_pi_volume_group", "delete")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
}

err = client.DeleteVolumeGroup(vgID)
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("DeleteVolumeGroup failed: %s", err.Error()), "ibm_pi_volume_group", "delete")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
_, err = isWaitForIBMPIVolumeGroupDeleted(ctx, client, vgID, d.Timeout(schema.TimeoutDelete))
if err != nil {
return diag.FromErr(err)
tfErr := flex.TerraformErrorf(err, fmt.Sprintf("isWaitForIBMPIVolumeGroupDeleted failed: %s", err.Error()), "ibm_pi_volume_group", "delete")
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}

d.SetId("")
return nil
}
func isWaitForIBMPIVolumeGroupAvailable(ctx context.Context, client *instance.IBMPIVolumeGroupClient, id string, timeout time.Duration) (interface{}, error) {
func isWaitForIBMPIVolumeGroupAvailable(ctx context.Context, client *instance.IBMPIVolumeGroupClient, id string, timeout time.Duration) (any, error) {
log.Printf("Waiting for Volume Group (%s) to be available.", id)

stateConf := &retry.StateChangeConf{
Expand Down Expand Up @@ -289,7 +339,7 @@ func isIBMPIVolumeGroupRefreshFunc(client *instance.IBMPIVolumeGroupClient, id s
}
}

func isWaitForIBMPIVolumeGroupDeleted(ctx context.Context, client *instance.IBMPIVolumeGroupClient, id string, timeout time.Duration) (interface{}, error) {
func isWaitForIBMPIVolumeGroupDeleted(ctx context.Context, client *instance.IBMPIVolumeGroupClient, id string, timeout time.Duration) (any, error) {
stateConf := &retry.StateChangeConf{
Pending: []string{State_Deleting, State_Updating},
Target: []string{State_Deleted},
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/pi_volume_group.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ Review the argument references that you can specify for your resource.

- `pi_cloud_instance_id` - (Required, String) The GUID of the service instance associated with an account.
- `pi_consistency_group_name` - (Optional, String) The name of consistency group at storage controller level, required if `pi_volume_group_name` is not provided.
- `pi_target_crn` - (Optional, String) Target CRN of the secondary workspace where the auxiliary data resides; if specified, the auxiliary volumes for the primary volumes getting added to the new volume group will be automatically onboarded into the secondary workspace and added to the corresponding auxiliary consistency group.
- `pi_volume_group_name` - (Optional, String) The name of the volume group, required if `pi_consistency_group_name` is not provided.
- `pi_volume_ids` - (Required, Set of String) List of volume IDs to add in volume group.

Expand All @@ -64,6 +65,7 @@ In addition to all argument reference list, you can access the following attribu
- `id` - (String) The unique identifier of the volume group. The ID is composed of `<pi_cloud_instance_id>/<volume_group_id>`.
- `replication_sites` - (List) Indicates the replication sites of the volume group.
- `replication_status` - (String) The replication status of volume group.
- `replication_target_crn` - (String) CRN of the replication target workspace; for a primary replicated volume this is the target workspace that owns the auxiliary data; for an auxiliary replicated volume this is the target workspace that owns the primary data.
- `status_description_errors` - (Set) The status details of the volume group.

Nested scheme for `status_description_errors`:
Expand Down
Loading