diff --git a/data/data/libvirt/bootstrap/main.tf b/data/data/libvirt/bootstrap/main.tf index 549e6aa464d..c70ecc06451 100644 --- a/data/data/libvirt/bootstrap/main.tf +++ b/data/data/libvirt/bootstrap/main.tf @@ -1,11 +1,13 @@ resource "libvirt_volume" "bootstrap" { name = "${var.cluster_id}-bootstrap" base_volume_id = var.base_volume_id + pool = var.pool } resource "libvirt_ignition" "bootstrap" { name = "${var.cluster_id}-bootstrap.ign" content = var.ignition + pool = var.pool } resource "libvirt_domain" "bootstrap" { diff --git a/data/data/libvirt/bootstrap/variables.tf b/data/data/libvirt/bootstrap/variables.tf index 85e456d222d..326245128a5 100644 --- a/data/data/libvirt/bootstrap/variables.tf +++ b/data/data/libvirt/bootstrap/variables.tf @@ -24,3 +24,7 @@ variable "network_id" { description = "The ID of a network resource containing the bootstrap node's addresses." } +variable "pool" { + type = string + description = "The name of the storage pool." +} diff --git a/data/data/libvirt/main.tf b/data/data/libvirt/main.tf index 68091997415..120f776e07c 100644 --- a/data/data/libvirt/main.tf +++ b/data/data/libvirt/main.tf @@ -2,11 +2,18 @@ provider "libvirt" { uri = var.libvirt_uri } +resource "libvirt_pool" "storage_pool" { + name = var.cluster_id + type = "dir" + path = "/var/lib/libvirt/openshift-images/${var.cluster_id}" +} + module "volume" { source = "./volume" cluster_id = var.cluster_id image = var.os_image + pool = libvirt_pool.storage_pool.name } module "bootstrap" { @@ -17,17 +24,20 @@ module "bootstrap" { cluster_id = var.cluster_id ignition = var.ignition_bootstrap network_id = libvirt_network.net.id + pool = libvirt_pool.storage_pool.name } resource "libvirt_volume" "master" { count = var.master_count name = "${var.cluster_id}-master-${count.index}" base_volume_id = module.volume.coreos_base_volume_id + pool = libvirt_pool.storage_pool.name } resource "libvirt_ignition" "master" { name = "${var.cluster_id}-master.ign" content = var.ignition_master + pool = libvirt_pool.storage_pool.name } resource "libvirt_network" "net" { diff --git a/data/data/libvirt/volume/main.tf b/data/data/libvirt/volume/main.tf index 651a75b9cd7..ee6257b0116 100644 --- a/data/data/libvirt/volume/main.tf +++ b/data/data/libvirt/volume/main.tf @@ -1,5 +1,5 @@ resource "libvirt_volume" "coreos_base" { name = "${var.cluster_id}-base" source = var.image + pool = var.pool } - diff --git a/data/data/libvirt/volume/variables.tf b/data/data/libvirt/volume/variables.tf index 19f1f9d9568..82b434d9b5b 100644 --- a/data/data/libvirt/volume/variables.tf +++ b/data/data/libvirt/volume/variables.tf @@ -8,3 +8,7 @@ variable "image" { type = string } +variable "pool" { + type = string + description = "The name of the storage pool." +} diff --git a/docs/dev/libvirt/README.md b/docs/dev/libvirt/README.md index 1e72f8feefc..62596caed87 100644 --- a/docs/dev/libvirt/README.md +++ b/docs/dev/libvirt/README.md @@ -155,25 +155,6 @@ NOTE: When the firewall rules are no longer needed, `sudo firewall-cmd --reload` will remove the changes made as they were not permanently added. For persistence, add `--permanent` to the `firewall-cmd` commands and run them a second time. -### Configure default libvirt storage pool - -Check to see if a default storage pool has been defined in Libvirt by running -`virsh --connect qemu:///system pool-list`. If it does not exist, create it: - -```sh -sudo virsh pool-define /dev/stdin < - default - - /var/lib/libvirt/images - - -EOF - -sudo virsh pool-start default -sudo virsh pool-autostart default -``` - ### Set up NetworkManager DNS overlay This step allows installer and users to resolve cluster-internal hostnames from your host. @@ -323,21 +304,6 @@ FATA[0019] failed to run Terraform: exit status 1 it is likely that your install configuration contains three backslashes after the protocol (e.g. `qemu+tcp:///...`), when it should only be two. -### SELinux might prevent access to image files -Configuring the storage pool to store images in a path incompatible with the SELinux policies (e.g. your home directory) might lead to the following errors: - -``` -Error: Error applying plan: - -1 error(s) occurred: - -* libvirt_domain.etcd: 1 error(s) occurred: - -* libvirt_domain.etcd: Error creating libvirt domain: virError(Code=1, Domain=10, Message='internal error: process exited while connecting to monitor: 2018-07-30T22:52:54.865806Z qemu-kvm: -fw_cfg name=opt/com.coreos/config,file=/home/user/VirtualMachines/etcd.ign: can't load /home/user/VirtualMachines/etcd.ign') -``` - -[As described here][libvirt_selinux_issues] you can workaround by disabling SELinux, or store the images in a place well-known to work, e.g. by using the default pool. - ### Random domain creation errors due to libvirt race conditon Depending on your libvirt version you might encounter [a race condition][bugzilla_libvirt_race] leading to an error similar to: diff --git a/pkg/asset/machines/libvirt/machines.go b/pkg/asset/machines/libvirt/machines.go index 2392bd9700c..2fe47b340b4 100644 --- a/pkg/asset/machines/libvirt/machines.go +++ b/pkg/asset/machines/libvirt/machines.go @@ -69,7 +69,7 @@ func provider(clusterID string, networkInterfaceAddress string, platform *libvir UserDataSecret: userDataSecret, }, Volume: &libvirtprovider.Volume{ - PoolName: "default", + PoolName: clusterID, BaseVolumeID: fmt.Sprintf("%s-base", clusterID), }, NetworkInterfaceName: clusterID, diff --git a/pkg/terraform/exec/plugins/Gopkg.lock b/pkg/terraform/exec/plugins/Gopkg.lock index 236c5931435..4e9446b5197 100644 --- a/pkg/terraform/exec/plugins/Gopkg.lock +++ b/pkg/terraform/exec/plugins/Gopkg.lock @@ -382,14 +382,14 @@ version = "v1.1.0" [[projects]] - digest = "1:ed022e6cec96994a6cc5fcdd73f1c85674a802ba872f6608a3849f8e9cc144bf" + digest = "1:1e1c7b87d9c9d66cc9bc11e96924ec0016a20f427f001106f1eed659f51704aa" name = "github.com/dmacvicar/terraform-provider-libvirt" packages = [ "libvirt", "libvirt/helper/suppress", ] pruneopts = "NUT" - revision = "5417057da4ea76505889ce96e762bdc36dd6894e" + revision = "c0e46b59df8718cdd905b1a3fb9738b0d4905143" [[projects]] branch = "master" diff --git a/pkg/terraform/exec/plugins/Gopkg.toml b/pkg/terraform/exec/plugins/Gopkg.toml index addedbd6cba..93a7eecd02d 100644 --- a/pkg/terraform/exec/plugins/Gopkg.toml +++ b/pkg/terraform/exec/plugins/Gopkg.toml @@ -11,7 +11,7 @@ ignored = [ [[constraint]] name = "github.com/dmacvicar/terraform-provider-libvirt" - revision = "5417057da4ea76505889ce96e762bdc36dd6894e" + revision = "c0e46b59df8718cdd905b1a3fb9738b0d4905143" [[constraint]] name = "github.com/terraform-providers/terraform-provider-aws" diff --git a/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/domain.go b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/domain.go index cc63339e7c8..4a1cb3445ac 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/domain.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/domain.go @@ -390,19 +390,37 @@ func setConsoles(d *schema.ResourceData, domainDef *libvirtxml.Domain) { Port: &consoleTargetPort, } } - if sourcePath, ok := d.GetOk(prefix + ".source_path"); ok { - console.Source = &libvirtxml.DomainChardevSource{ - Dev: &libvirtxml.DomainChardevSourceDev{ - Path: sourcePath.(string), - }, - } - } if targetType, ok := d.GetOk(prefix + ".target_type"); ok { if console.Target == nil { console.Target = &libvirtxml.DomainConsoleTarget{} } console.Target.Type = targetType.(string) } + switch d.Get(prefix + ".type").(string) { + case "tcp": + sourceHost := d.Get(prefix + ".source_host") + sourceService := d.Get(prefix + ".source_service") + console.Source = &libvirtxml.DomainChardevSource{ + TCP: &libvirtxml.DomainChardevSourceTCP{ + Mode: "bind", + Host: sourceHost.(string), + Service: sourceService.(string), + }, + } + console.Protocol = &libvirtxml.DomainChardevProtocol{ + Type: "telnet", + } + case "pty": + fallthrough + default: + if sourcePath, ok := d.GetOk(prefix + ".source_path"); ok { + console.Source = &libvirtxml.DomainChardevSource{ + Dev: &libvirtxml.DomainChardevSourceDev{ + Path: sourcePath.(string), + }, + } + } + } domainDef.Devices.Consoles = append(domainDef.Devices.Consoles, console) } } @@ -500,7 +518,16 @@ func setDisks(d *schema.ResourceData, domainDef *libvirtxml.Domain, virConn *lib if strings.HasSuffix(url.Path, ".iso") { disk.Device = "cdrom" + disk.Target = &libvirtxml.DomainDiskTarget{ + Dev: fmt.Sprintf("hd%s", diskLetterForIndex(numOfISOs)), + Bus: "ide", + } + disk.Driver = &libvirtxml.DomainDiskDriver{ + Name: "qemu", + } + numOfISOs++ } + if !strings.HasSuffix(url.Path, ".qcow2") { disk.Driver.Type = "raw" } @@ -525,6 +552,10 @@ func setDisks(d *schema.ResourceData, domainDef *libvirtxml.Domain, virConn *lib numOfISOs++ } + + if !strings.HasSuffix(file.(string), ".qcow2") { + disk.Driver.Type = "raw" + } } domainDef.Devices.Disks = append(domainDef.Devices.Disks, disk) diff --git a/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/pool.go b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/pool.go new file mode 100644 index 00000000000..e5c5b566d7e --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/pool.go @@ -0,0 +1,115 @@ +package libvirt + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + libvirt "github.com/libvirt/libvirt-go" +) + +const ( + poolExistsID = "EXISTS" + poolNotExistsID = "NOT-EXISTS" +) + +// poolExists returns "EXISTS" or "NOT-EXISTS" depending on the current pool existence +func poolExists(virConn *libvirt.Connect, uuid string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + pool, err := virConn.LookupStoragePoolByUUIDString(uuid) + if err != nil { + if err.(libvirt.Error).Code == libvirt.ERR_NO_STORAGE_POOL { + log.Printf("Pool %s does not exist", uuid) + return virConn, "NOT-EXISTS", nil + } + log.Printf("Pool %s: error: %s", uuid, err.(libvirt.Error).Message) + } + if pool != nil { + defer pool.Free() + } + return virConn, poolExistsID, err + } +} + +// poolWaitForExists waits for a storage pool to be up and timeout after 5 minutes. +func poolWaitForExists(virConn *libvirt.Connect, uuid string) error { + log.Printf("Waiting for pool %s to be active...", uuid) + stateConf := &resource.StateChangeConf{ + Pending: []string{poolNotExistsID}, + Target: []string{poolExistsID}, + Refresh: poolExists(virConn, uuid), + Timeout: 1 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + log.Printf("%s", err) + return fmt.Errorf("unexpected error during pool creation operation. The operation did not complete successfully") + } + return nil +} + +// poolWaitDeleted waits for a storage pool to be removed +func poolWaitDeleted(virConn *libvirt.Connect, uuid string) error { + log.Printf("Waiting for pool %s to be deleted...", uuid) + stateConf := &resource.StateChangeConf{ + Pending: []string{poolExistsID}, + Target: []string{poolNotExistsID}, + Refresh: poolExists(virConn, uuid), + Timeout: 1 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + log.Printf("%s", err) + return fmt.Errorf("unexpected error during pool destroy operation. The pool was not deleted") + } + return nil +} + +// deletePool deletes the pool identified by `uuid` from libvirt +func deletePool(client *Client, uuid string) error { + virConn := client.libvirt + if virConn == nil { + return fmt.Errorf(LibVirtConIsNil) + } + + pool, err := virConn.LookupStoragePoolByUUIDString(uuid) + if err != nil { + return fmt.Errorf("error retrieving storage pool info: %s", err) + } + + poolName, err := pool.GetName() + if err != nil { + return fmt.Errorf("error retrieving storage pool name: %s", err) + } + client.poolMutexKV.Lock(poolName) + defer client.poolMutexKV.Unlock(poolName) + + info, err := pool.GetInfo() + if err != nil { + return fmt.Errorf("error retrieving storage pool info: %s", err) + } + + if info.State != libvirt.STORAGE_POOL_INACTIVE { + err := pool.Destroy() + if err != nil { + return fmt.Errorf("error deleting storage pool: %s", err) + } + } + + err = pool.Delete(0) + if err != nil { + return fmt.Errorf("error deleting storage pool: %s", err) + } + + err = pool.Undefine() + if err != nil { + return fmt.Errorf("error deleting storage pool: %s", err) + } + + return poolWaitDeleted(client.libvirt, uuid) +} diff --git a/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/provider.go b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/provider.go index e27afceb86e..bf2ce19fc21 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/provider.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/provider.go @@ -23,6 +23,7 @@ func Provider() terraform.ResourceProvider { "libvirt_domain": resourceLibvirtDomain(), "libvirt_volume": resourceLibvirtVolume(), "libvirt_network": resourceLibvirtNetwork(), + "libvirt_pool": resourceLibvirtPool(), "libvirt_cloudinit_disk": resourceCloudInitDisk(), "libvirt_ignition": resourceIgnition(), }, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/resource_libvirt_domain.go b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/resource_libvirt_domain.go index 7bf63e3ff9c..f56c33f940a 100644 --- a/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/resource_libvirt_domain.go +++ b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/resource_libvirt_domain.go @@ -277,6 +277,18 @@ func resourceLibvirtDomain() *schema.Resource { Optional: true, ForceNew: true, }, + "source_host": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "127.0.0.1", + }, + "source_service": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "0", + }, "target_port": { Type: schema.TypeString, Required: true, diff --git a/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/resource_libvirt_pool.go b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/resource_libvirt_pool.go new file mode 100644 index 00000000000..6012a8cbf31 --- /dev/null +++ b/pkg/terraform/exec/plugins/vendor/github.com/dmacvicar/terraform-provider-libvirt/libvirt/resource_libvirt_pool.go @@ -0,0 +1,255 @@ +package libvirt + +import ( + "encoding/xml" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + libvirt "github.com/libvirt/libvirt-go" + "github.com/libvirt/libvirt-go-xml" +) + +func resourceLibvirtPool() *schema.Resource { + return &schema.Resource{ + Create: resourceLibvirtPoolCreate, + Read: resourceLibvirtPoolRead, + Delete: resourceLibvirtPoolDelete, + Exists: resourceLibvirtPoolExists, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "capacity": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "allocation": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + "available": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "xml": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "xslt": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + // Dir-specific attributes + "path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + } +} + +func resourceLibvirtPoolCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Client) + if client.libvirt == nil { + return fmt.Errorf(LibVirtConIsNil) + } + + poolType := d.Get("type").(string) + if poolType != "dir" { + return fmt.Errorf("Only storage pools of type \"dir\" are supported") + } + + poolName := d.Get("name").(string) + + client.poolMutexKV.Lock(poolName) + defer client.poolMutexKV.Unlock(poolName) + + // Check whether the storage pool already exists. Its name needs to be + // unique. + if _, err := client.libvirt.LookupStoragePoolByName(poolName); err == nil { + return fmt.Errorf("storage pool '%s' already exists", poolName) + } + log.Printf("[DEBUG] Pool with name '%s' does not exist yet", poolName) + + poolPath := d.Get("path").(string) + if poolPath == "" { + return fmt.Errorf("\"path\" attribute is requires for storage pools of type \"dir\"") + } + + poolDef := libvirtxml.StoragePool{ + Type: "dir", + Name: poolName, + Target: &libvirtxml.StoragePoolTarget{ + Path: poolPath, + }, + } + data, err := xmlMarshallIndented(poolDef) + if err != nil { + return fmt.Errorf("Error serializing libvirt storage pool: %s", err) + } + log.Printf("[DEBUG] Generated XML for libvirt storage pool:\n%s", data) + + data, err = transformResourceXML(data, d) + if err != nil { + return fmt.Errorf("Error applying XSLT stylesheet: %s", err) + } + + // create the pool + pool, err := client.libvirt.StoragePoolDefineXML(data, 0) + if err != nil { + return fmt.Errorf("Error creating libvirt storage pool: %s", err) + } + defer pool.Free() + + err = pool.Build(0) + if err != nil { + return fmt.Errorf("Error building libvirt storage pool: %s", err) + } + + err = pool.SetAutostart(true) + if err != nil { + return fmt.Errorf("Error setting up libvirt storage pool: %s", err) + } + + err = pool.Create(0) + if err != nil { + return fmt.Errorf("Error starting libvirt storage pool: %s", err) + } + + err = pool.Refresh(0) + if err != nil { + return fmt.Errorf("Error refreshing libvirt storage pool: %s", err) + } + + id, err := pool.GetUUIDString() + if err != nil { + return fmt.Errorf("Error retrieving libvirt pool id: %s", err) + } + d.SetId(id) + + // make sure we record the id even if the rest of this gets interrupted + d.Partial(true) + d.Set("id", id) + d.SetPartial("id") + d.Partial(false) + + log.Printf("[INFO] Pool ID: %s", d.Id()) + + if err := poolWaitForExists(client.libvirt, id); err != nil { + return err + } + + return resourceLibvirtPoolRead(d, meta) +} + +func resourceLibvirtPoolRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Client) + virConn := client.libvirt + if virConn == nil { + return fmt.Errorf(LibVirtConIsNil) + } + + pool, err := virConn.LookupStoragePoolByUUIDString(d.Id()) + if pool == nil { + log.Printf("storage pool '%s' may have been deleted outside Terraform", d.Id()) + d.SetId("") + return nil + } + defer pool.Free() + + poolName, err := pool.GetName() + if err != nil { + return fmt.Errorf("error retrieving pool name: %s", err) + } + d.Set("name", poolName) + + info, err := pool.GetInfo() + if err != nil { + return fmt.Errorf("error retrieving pool info: %s", err) + } + d.Set("capacity", info.Capacity) + d.Set("allocation", info.Allocation) + d.Set("available", info.Available) + + poolDefXML, err := pool.GetXMLDesc(0) + if err != nil { + return fmt.Errorf("could not get XML description for pool %s: %s", poolName, err) + } + + var poolDef libvirtxml.StoragePool + err = xml.Unmarshal([]byte(poolDefXML), &poolDef) + if err != nil { + return fmt.Errorf("could not get a pool definition from XML for %s: %s", poolDef.Name, err) + } + + var poolPath string + if poolDef.Target != nil && poolDef.Target.Path != "" { + poolPath = poolDef.Target.Path + } + + if poolPath == "" { + log.Printf("Pool %s has no path specified", poolName) + } else { + log.Printf("[DEBUG] Pool %s path: %s", poolName, poolPath) + d.Set("path", poolPath) + } + + return nil +} + +func resourceLibvirtPoolDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*Client) + if client.libvirt == nil { + return fmt.Errorf(LibVirtConIsNil) + } + + return deletePool(client, d.Id()) +} + +func resourceLibvirtPoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + log.Printf("[DEBUG] Check if resource libvirt_pool exists") + client := meta.(*Client) + virConn := client.libvirt + if virConn == nil { + return false, fmt.Errorf(LibVirtConIsNil) + } + + pool, err := virConn.LookupStoragePoolByUUIDString(d.Id()) + if err != nil { + virErr := err.(libvirt.Error) + if virErr.Code != libvirt.ERR_NO_STORAGE_POOL { + return false, fmt.Errorf("Can't retrieve pool %s", d.Id()) + } + // does not exist, but no error + return false, nil + } + defer pool.Free() + + return true, nil +}