diff --git a/client/vm.go b/client/vm.go index a36b50c3..3504e5a2 100644 --- a/client/vm.go +++ b/client/vm.go @@ -272,7 +272,6 @@ func (c *Client) CreateVm(vmReq Vm, createTime time.Duration) (*Vm, error) { "cpuCap": nil, "cpuWeight": nil, "CPUs": vmReq.CPUs.Number, - "memoryMax": vmReq.Memory.Static[1], "existingDisks": existingDisks, // TODO: (#145) Uncomment this once issues with secure_boot have been figured out // "secureBoot": vmReq.SecureBoot, @@ -283,6 +282,15 @@ func (c *Client) CreateVm(vmReq Vm, createTime time.Duration) (*Vm, error) { "auto_poweron": vmReq.AutoPoweron, "high_availability": vmReq.HA, } + if len(vmReq.Memory.Dynamic) == 2 && vmReq.Memory.Dynamic[0] != 0 { + params["memoryMin"] = vmReq.Memory.Dynamic[0] + } + if len(vmReq.Memory.Dynamic) == 2 && vmReq.Memory.Dynamic[1] != 0 { + params["memoryMax"] = vmReq.Memory.Dynamic[1] + } + if len(vmReq.Memory.Static) == 2 && vmReq.Memory.Static[1] != 0 { + params["memoryStaticMax"] = vmReq.Memory.Static[1] + } if !params["clone"].(bool) && vmReq.CloneType == CloneTypeFastClone { fmt.Printf("[WARN] A fast clone was requested but falling back to full due to lack of disk template support\n") @@ -362,6 +370,15 @@ func (c *Client) CreateVm(vmReq Vm, createTime time.Duration) (*Vm, error) { "id": vmId, "xenStoreData": vmReq.XenstoreData, } + if params["memoryStaticMax"] != nil { + xsParams["memoryStaticMax"] = params["memoryStaticMax"] + } + if params["memoryMin"] != nil { + xsParams["memoryMin"] = params["memoryMin"] + } + if params["memoryMax"] != nil { + xsParams["memoryMax"] = params["memoryMax"] + } var success bool err = c.Call("vm.set", xsParams, &success) @@ -409,7 +426,6 @@ func (c *Client) UpdateVm(vmReq Vm) (*Vm, error) { "auto_poweron": vmReq.AutoPoweron, "high_availability": vmReq.HA, // valid options are best-effort, restart, '' "CPUs": vmReq.CPUs.Number, - "memoryMax": vmReq.Memory.Static[1], "expNestedHvm": vmReq.ExpNestedHvm, "startDelay": vmReq.StartDelay, // TODO: These need more investigation before they are implemented @@ -424,6 +440,15 @@ func (c *Client) UpdateVm(vmReq Vm) (*Vm, error) { // cpusMask, cpuWeight and cpuCap can be changed at runtime to an integer value or null // coresPerSocket is null or a number of cores per socket. Putting an invalid value doesn't seem to cause an error :( } + if len(vmReq.Memory.Dynamic) == 2 && vmReq.Memory.Dynamic[0] != 0 { + params["memoryMin"] = vmReq.Memory.Dynamic[0] + } + if len(vmReq.Memory.Dynamic) == 2 && vmReq.Memory.Dynamic[1] != 0 { + params["memoryMax"] = vmReq.Memory.Dynamic[1] + } + if len(vmReq.Memory.Static) == 2 && vmReq.Memory.Static[1] != 0 { + params["memoryStaticMax"] = vmReq.Memory.Static[1] + } affinityHost := vmReq.AffinityHost if affinityHost != nil { diff --git a/docs/resources/vm.md b/docs/resources/vm.md index e979d350..295a8b94 100644 --- a/docs/resources/vm.md +++ b/docs/resources/vm.md @@ -134,7 +134,7 @@ $ xo-cli xo.getAllObjects filter='json:{"id": "cf7b5d7d-3cd5-6b7c-5025-5c935c8cd # Updating the VM to use 5 CPUs would stop/start the VM ``` - `disk` (Block List, Min: 1) The disk the VM will have access to. (see [below for nested schema](#nestedblock--disk)) -- `memory_max` (Number) The amount of memory in bytes the VM will have. Updates to this field will case a stop and start of the VM if the new value is greater than the dynamic memory max. This can be determined with the following command: +- `memory_max` (Number) The amount of static memory in bytes the VM will have. Updates to this field will case a stop and start of the VM if the new value is greater than the dynamic memory max. This can be determined with the following command: ``` @@ -178,6 +178,8 @@ $ xo-cli xo.getAllObjects filter='json:{"id": "cf7b5d7d-3cd5-6b7c-5025-5c935c8cd - `videoram` (Number) The videoram option the VM should use. Possible values include 1, 2, 4, 8, 16 - `wait_for_ip` (Boolean) Whether terraform should wait until IP addresses are present on the VM's network interfaces before considering it created. This only works if guest-tools are installed in the VM. Defaults to false. - `xenstore` (Map of String) The key value pairs to be populated in xenstore. +- `memory_dynamic_min` (Number) Dynamic minimum (bytes) +- `memory_dynamic_max` (Number) Dynamic maximum (bytes) ### Read-Only diff --git a/xoa/data_source_vms.go b/xoa/data_source_vms.go index f5c68049..65125455 100644 --- a/xoa/data_source_vms.go +++ b/xoa/data_source_vms.go @@ -81,6 +81,8 @@ func vmToMapList(vms []client.Vm) []map[string]interface{} { "cloud_config": vm.CloudConfig, "cloud_network_config": vm.CloudNetworkConfig, "tags": vm.Tags, + "memory_dynamic_min": vm.Memory.Dynamic[0], + "memory_dynamic_max": vm.Memory.Dynamic[1], "memory_max": vm.Memory.Static[1], "affinity_host": vm.AffinityHost, "template": vm.Template, diff --git a/xoa/internal/state/migrate.go b/xoa/internal/state/migrate.go index 9a14d33d..bc51aaac 100644 --- a/xoa/internal/state/migrate.go +++ b/xoa/internal/state/migrate.go @@ -38,6 +38,11 @@ func VmStateUpgradeV0(ctx context.Context, rawState map[string]interface{}, meta rawState["destroy_cloud_config_vdi_after_boot"] = false return rawState, nil } +func VmStateUpgradeV1(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + rawState["memory_dynamic_max"] = rawState["memory_max"] + delete(rawState, "memory_max") + return rawState, nil +} func ResourceVmResourceV0() *schema.Resource { return &schema.Resource{ @@ -352,6 +357,39 @@ $ xo-cli xo.getAllObjects filter='json:{"id": "cf7b5d7d-3cd5-6b7c-5025-5c935c8cd } } +func ResourceVmResourceV1() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "memory_max": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + Description: `The amount of memory in bytes the VM will have. Updates to this field will case a stop and start of the VM if the new value is greater than the dynamic memory max. This can be determined with the following command: + +$ xo-cli xo.getAllObjects filter='json:{"id": "cf7b5d7d-3cd5-6b7c-5025-5c935c8cd0b8"}' | jq '.[].memory.dynamic' +[ + 2147483648, # memory dynamic min + 4294967296 # memory dynamic max (4GB) +] +# Updating the VM to use 3GB of memory would happen without stopping/starting the VM +# Updating the VM to use 5GB of memory would stop/start the VM + `, + }, + "memory_dynamic_min": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: `Dynamic minimum (bytes)`, + Computed: true, + }, + "memory_dynamic_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: `Dynamic maximum (bytes)`, + Computed: true, + }, + }, + } +} + func suppressAttachedDiffWhenHalted(k, old, new string, d *schema.ResourceData) (suppress bool) { powerState := d.Get("power_state").(string) suppress = true diff --git a/xoa/resource_xenorchestra_vm.go b/xoa/resource_xenorchestra_vm.go index e5f6be73..47c16641 100644 --- a/xoa/resource_xenorchestra_vm.go +++ b/xoa/resource_xenorchestra_vm.go @@ -217,7 +217,7 @@ $ xo-cli xo.getAllObjects filter='json:{"id": "cf7b5d7d-3cd5-6b7c-5025-5c935c8cd "memory_max": &schema.Schema{ Type: schema.TypeInt, Required: true, - Description: `The amount of memory in bytes the VM will have. Updates to this field will case a stop and start of the VM if the new value is greater than the dynamic memory max. This can be determined with the following command: + Description: `The amount of static memory in bytes the VM will have. Updates to this field will case a stop and start of the VM if the new value is greater than the dynamic memory max. This can be determined with the following command: ` + "```" + ` @@ -232,6 +232,18 @@ $ xo-cli xo.getAllObjects filter='json:{"id": "cf7b5d7d-3cd5-6b7c-5025-5c935c8cd `, }, + "memory_dynamic_min": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: `Dynamic minimum (bytes)`, + Computed: true, + }, + "memory_dynamic_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Description: `Dynamic maximum (bytes)`, + Computed: true, + }, "resource_set": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -442,13 +454,18 @@ This does not work in terraform since that is applied on Xen Orchestra's client Read: resourceVmRead, Update: resourceVmUpdate, Delete: resourceVmDelete, - SchemaVersion: 1, + SchemaVersion: 2, StateUpgraders: []schema.StateUpgrader{ { Type: state.ResourceVmResourceV0().CoreConfigSchema().ImpliedType(), Upgrade: state.VmStateUpgradeV0, Version: 0, }, + { + Type: state.ResourceVmResourceV1().CoreConfigSchema().ImpliedType(), + Upgrade: state.VmStateUpgradeV1, + Version: 1, + }, }, Importer: &schema.ResourceImporter{ State: RecordImport, @@ -539,6 +556,11 @@ func resourceVmCreate(d *schema.ResourceData, m interface{}) error { Id: rsId.(string), } } + + memoryObject, err := memory(d) + if err != nil { + return err + } createVmParams := client.Vm{ BlockedOperations: blockedOperations, Boot: client.Boot{ @@ -559,14 +581,10 @@ func resourceVmCreate(d *schema.ResourceData, m interface{}) error { Number: d.Get("cpus").(int), }, CloudNetworkConfig: d.Get("cloud_network_config").(string), - Memory: client.MemoryObject{ - Static: []int{ - 0, d.Get("memory_max").(int), - }, - }, - Tags: vmTags, - Disks: ds, - Installation: installation, + Memory: *memoryObject, + Tags: vmTags, + Disks: ds, + Installation: installation, // TODO: (#145) Uncomment this once issues with secure_boot have been figured out // SecureBoot: d.Get("secure_boot").(bool), VIFsMap: network_maps, @@ -756,7 +774,6 @@ func resourceVmUpdate(d *schema.ResourceData, m interface{}) error { Id: d.Get("resource_set").(string), } } - memoryMax := d.Get("memory_max").(int) vm, err := c.GetVm(client.Vm{Id: id}) @@ -882,7 +899,7 @@ func resourceVmUpdate(d *schema.ResourceData, m interface{}) error { haltForUpdates = true } - if _, nMemoryMax := d.GetChange("memory_max"); d.HasChange("memory_max") && nMemoryMax.(int) > vm.Memory.Static[1] { + if d.HasChange("memory_max") { haltForUpdates = true } @@ -904,16 +921,16 @@ func resourceVmUpdate(d *schema.ResourceData, m interface{}) error { } + memoryObject, err := memory(d) + if err != nil { + return err + } vmReq := client.Vm{ Id: id, CPUs: client.CPUs{ Number: cpus, }, - Memory: client.MemoryObject{ - Static: []int{ - 0, memoryMax, - }, - }, + Memory: *memoryObject, NameLabel: nameLabel, NameDescription: nameDescription, HA: ha, @@ -933,6 +950,10 @@ func resourceVmUpdate(d *schema.ResourceData, m interface{}) error { }, } + if len(vmReq.Memory.Dynamic) == 2 && vmReq.Memory.Dynamic[1] > vm.Memory.Static[1] { + haltForUpdates = true + } + if d.HasChange("affinity_host") { vmReq.AffinityHost = &affinityHost } @@ -1030,6 +1051,37 @@ func resourceVmUpdate(d *schema.ResourceData, m interface{}) error { return resourceVmRead(d, m) } +func memory(d *schema.ResourceData) (*client.MemoryObject, error) { + memory := &client.MemoryObject{ + Dynamic: []int{}, + Static: []int{ + 0, d.Get("memory_max").(int), + }, + } + if !d.GetRawConfig().GetAttr("memory_dynamic_min").IsNull() { + if memoryDynamicMin, ok := d.GetOk("memory_dynamic_min"); ok { + if len(memory.Dynamic) == 0 { + memory.Dynamic = []int{memoryDynamicMin.(int), 0} + } else { + memory.Dynamic[0] = memoryDynamicMin.(int) + } + } + } + if !d.GetRawConfig().GetAttr("memory_dynamic_max").IsNull() { + if memoryDynamicMax, ok := d.GetOk("memory_dynamic_max"); ok { + if len(memory.Dynamic) == 0 { + memory.Dynamic = []int{0, memoryDynamicMax.(int)} + } else { + memory.Dynamic[1] = memoryDynamicMax.(int) + } + } + if memory.Dynamic[1] > memory.Static[1] { + return nil, errors.New(fmt.Sprintf("memory_dynamic_max cannot be more than memory_max")) + } + } + return memory, nil +} + func resourceVmDelete(d *schema.ResourceData, m interface{}) error { c := m.(client.XOClient) @@ -1119,13 +1171,9 @@ func RecordImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData func recordToData(resource client.Vm, vifs []client.VIF, disks []client.Disk, cdroms []client.Disk, d *schema.ResourceData) error { d.SetId(resource.Id) // d.Set("cloud_config", resource.CloudConfig) - if len(resource.Memory.Dynamic) == 2 { - if err := d.Set("memory_max", resource.Memory.Dynamic[1]); err != nil { - return err - } - } else { - log.Printf("[WARN] Expected the VM's static memory limits to have two values, %v found instead\n", resource.Memory.Dynamic) - } + d.Set("memory_max", resource.Memory.Static[1]) + d.Set("memory_dynamic_min", resource.Memory.Dynamic[0]) + d.Set("memory_dynamic_max", resource.Memory.Dynamic[1]) d.Set("cpus", resource.CPUs.Number) d.Set("name_label", resource.NameLabel) diff --git a/xoa/resource_xenorchestra_vm_test.go b/xoa/resource_xenorchestra_vm_test.go index 7e0e51f3..a215442f 100644 --- a/xoa/resource_xenorchestra_vm_test.go +++ b/xoa/resource_xenorchestra_vm_test.go @@ -1589,6 +1589,54 @@ func TestAccXenorchestraVm_updatesWithoutRebootForOtherAttrs(t *testing.T) { }) } +func TestAccXenorchestraVm_updatesDynamicMemory(t *testing.T) { + resourceName := "xenorchestra_vm.bar" + vmName := fmt.Sprintf("%s - %s", accTestPrefix, t.Name()) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccFailToStartAndHaltProviders, + CheckDestroy: testAccCheckXenorchestraVmDestroy, + Steps: []resource.TestStep{ + { + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamicMax(4, 4295000000, 3221225472, vmName, "", "", false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "cpus", "4"), + resource.TestCheckResourceAttr(resourceName, "memory_max", "4295000000"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_max", "3221225472"), + ), + }, + { + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamic(4, 4295000000, 2147500000, 3221225472, vmName, "", "", false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "cpus", "4"), + resource.TestCheckResourceAttr(resourceName, "memory_max", "4295000000"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_min", "2147500000"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_max", "3221225472"), + ), + }, + { + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamicMin(4, 4295000000, 3221225472, vmName, "", "", false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "cpus", "4"), + resource.TestCheckResourceAttr(resourceName, "memory_max", "4295000000"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_min", "3221225472"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_max", "3221225472"), + ), + }, + { + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamicMax(4, 3221225472, 4295000000, vmName, "", "", false), + ExpectError: regexp.MustCompile("memory_dynamic_max cannot be more than memory_max"), + }, + }, + }) +} + func TestAccXenorchestraVm_updatesThatRequireReboot(t *testing.T) { resourceName := "xenorchestra_vm.bar" vmName := fmt.Sprintf("%s - %s", accTestPrefix, t.Name()) @@ -1597,6 +1645,35 @@ func TestAccXenorchestraVm_updatesThatRequireReboot(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckXenorchestraVmDestroy, Steps: []resource.TestStep{ + { + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemory(4, 6295000000, vmName, "", "", false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "cpus", "4"), + resource.TestCheckResourceAttr(resourceName, "memory_max", "6295000000"), + ), + }, + { + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemory(4, 4295000000, vmName, "", "", false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "cpus", "4"), + resource.TestCheckResourceAttr(resourceName, "memory_max", "4295000000"), + ), + }, + { + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamic(4, 6295000000, 1073741824, 6295000000, vmName, "", "", false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "cpus", "4"), + resource.TestCheckResourceAttr(resourceName, "memory_max", "6295000000"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_max", "6295000000"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_min", "1073741824"), + ), + }, { Config: testAccVmConfigUpdateAttrsVariableCPUAndMemory(2, 4295000000, vmName, "", "", false), Check: resource.ComposeAggregateTestCheckFunc( @@ -1604,14 +1681,15 @@ func TestAccXenorchestraVm_updatesThatRequireReboot(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "id"), resource.TestCheckResourceAttr(resourceName, "cpus", "2"), resource.TestCheckResourceAttr(resourceName, "memory_max", "4295000000"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_max", "4295000000"), ), }, { - Config: testAccVmConfigUpdateAttrsVariableCPUAndMemory(5, 6295000000, vmName, "", "", false), + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemory(4, 6295000000, vmName, "", "", false), Check: resource.ComposeAggregateTestCheckFunc( testAccVmExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "cpus", "5"), + resource.TestCheckResourceAttr(resourceName, "cpus", "4"), resource.TestCheckResourceAttr(resourceName, "memory_max", "6295000000"), ), }, @@ -1619,7 +1697,7 @@ func TestAccXenorchestraVm_updatesThatRequireReboot(t *testing.T) { }) } -func TestAccXenorchestraVm_updatingCpusInsideMaxCpuAndMemInsideStaticMaxDoesNotRequireReboot(t *testing.T) { +func TestAccXenorchestraVm_updatingCpusInsideMaxCpuAndMemInsideDynamicMaxDoesNotRequireReboot(t *testing.T) { resourceName := "xenorchestra_vm.bar" vmName := fmt.Sprintf("%s - %s", accTestPrefix, t.Name()) resource.ParallelTest(t, resource.TestCase{ @@ -1631,21 +1709,21 @@ func TestAccXenorchestraVm_updatingCpusInsideMaxCpuAndMemInsideStaticMaxDoesNotR CheckDestroy: testAccCheckXenorchestraVmDestroy, Steps: []resource.TestStep{ { - Config: testAccVmConfigUpdateAttrsVariableCPUAndMemory(5, 4295000000, vmName, "", "", false), + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamic(4, 4295000000, 1073741824, 4295000000, vmName, "", "", false), Check: resource.ComposeAggregateTestCheckFunc( testAccVmExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "cpus", "5"), - resource.TestCheckResourceAttr(resourceName, "memory_max", "4295000000"), + resource.TestCheckResourceAttr(resourceName, "cpus", "4"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_max", "4295000000"), ), }, { - Config: testAccVmConfigUpdateAttrsVariableCPUAndMemory(2, 3221225472, vmName, "", "", false), + Config: testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamic(2, 4295000000, 1073741824, 3221225472, vmName, "", "", false), Check: resource.ComposeAggregateTestCheckFunc( testAccVmExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "id"), resource.TestCheckResourceAttr(resourceName, "cpus", "2"), - resource.TestCheckResourceAttr(resourceName, "memory_max", "3221225472"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_max", "3221225472"), ), }, }, @@ -1791,6 +1869,45 @@ func TestAccXenorchestraVm_createWithV0StateMigration(t *testing.T) { }) } +func TestAccXenorchestraVm_createWithV1StateMigration(t *testing.T) { + resourceName := "xenorchestra_vm.bar" + vmName := fmt.Sprintf("%s - %s", accTestPrefix, t.Name()) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CheckDestroy: testAccCheckXenorchestraVmDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "xenorchestra": { + Source: "vatesfr/xenorchestra", + VersionConstraint: "0.28.0", + }, + }, + Config: testAccVmConfig(vmName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckNoResourceAttr(resourceName, "memory_dynamic_max"), + ), + }, + { + ProviderFactories: map[string]func() (*schema.Provider, error){ + "xenorchestra": func() (*schema.Provider, error) { + return Provider(), nil + }, + }, + Config: testAccVmConfig(vmName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccVmExists(resourceName), + resource.TestCheckResourceAttrSet(resourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "memory_dynamic_max", "4295000000"), + resource.TestCheckResourceAttr(resourceName, "memory_max", "4295000000"), + ), + }, + }, + }) +} + func testAccVmExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -2851,6 +2968,109 @@ resource "xenorchestra_vm" "bar" { } `, accTestPool.NameLabel, accDefaultNetwork.NameLabel, memory, cpus, nameLabel, nameDescription, ha, powerOn, accDefaultSr.Id) } +func testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamic(cpus, memory int, memoryMin int, memoryMax int, nameLabel, nameDescription, ha string, powerOn bool) string { + return testAccCloudConfigConfig(fmt.Sprintf("vm-template-%s", nameLabel), "template") + testAccTemplateConfig() + fmt.Sprintf(` +data "xenorchestra_pool" "pool" { + name_label = "%s" +} + +data "xenorchestra_network" "network" { + name_label = "%s" + pool_id = data.xenorchestra_pool.pool.id +} + +resource "xenorchestra_vm" "bar" { + memory_max = %d + memory_dynamic_min = %d + memory_dynamic_max = %d + cpus = %d + cloud_config = xenorchestra_cloud_config.bar.template + name_label = "%s" + name_description = "%s" + affinity_host = data.xenorchestra_pool.pool.master + template = data.xenorchestra_template.template.id + high_availability = "%s" + auto_poweron = "%t" + network { + network_id = data.xenorchestra_network.network.id + } + + disk { + sr_id = "%s" + name_label = "disk 1" + size = 10001317888 + } +} +`, accTestPool.NameLabel, accDefaultNetwork.NameLabel, memory, memoryMin, memoryMax, cpus, nameLabel, nameDescription, ha, powerOn, accDefaultSr.Id) +} +func testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamicMin(cpus, memory int, memoryMin int, nameLabel, nameDescription, ha string, powerOn bool) string { + return testAccCloudConfigConfig(fmt.Sprintf("vm-template-%s", nameLabel), "template") + testAccTemplateConfig() + fmt.Sprintf(` +data "xenorchestra_pool" "pool" { + name_label = "%s" +} + +data "xenorchestra_network" "network" { + name_label = "%s" + pool_id = data.xenorchestra_pool.pool.id +} + +resource "xenorchestra_vm" "bar" { + memory_max = %d + memory_dynamic_min = %d + cpus = %d + cloud_config = xenorchestra_cloud_config.bar.template + name_label = "%s" + name_description = "%s" + affinity_host = data.xenorchestra_pool.pool.master + template = data.xenorchestra_template.template.id + high_availability = "%s" + auto_poweron = "%t" + network { + network_id = data.xenorchestra_network.network.id + } + + disk { + sr_id = "%s" + name_label = "disk 1" + size = 10001317888 + } +} +`, accTestPool.NameLabel, accDefaultNetwork.NameLabel, memory, memoryMin, cpus, nameLabel, nameDescription, ha, powerOn, accDefaultSr.Id) +} +func testAccVmConfigUpdateAttrsVariableCPUAndMemoryDynamicMax(cpus, memory int, memoryDynamicMax int, nameLabel, nameDescription, ha string, powerOn bool) string { + return testAccCloudConfigConfig(fmt.Sprintf("vm-template-%s", nameLabel), "template") + testAccTemplateConfig() + fmt.Sprintf(` +data "xenorchestra_pool" "pool" { + name_label = "%s" +} + +data "xenorchestra_network" "network" { + name_label = "%s" + pool_id = data.xenorchestra_pool.pool.id +} + +resource "xenorchestra_vm" "bar" { + memory_max = %d + memory_dynamic_max = %d + cpus = %d + cloud_config = xenorchestra_cloud_config.bar.template + name_label = "%s" + name_description = "%s" + affinity_host = data.xenorchestra_pool.pool.master + template = data.xenorchestra_template.template.id + high_availability = "%s" + auto_poweron = "%t" + network { + network_id = data.xenorchestra_network.network.id + } + + disk { + sr_id = "%s" + name_label = "disk 1" + size = 10001317888 + } +} +`, accTestPool.NameLabel, accDefaultNetwork.NameLabel, memory, memoryDynamicMax, cpus, nameLabel, nameDescription, ha, powerOn, accDefaultSr.Id) +} func providerCredentials(username, password string) string { return fmt.Sprintf(`