Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update retry strategy #743

Merged
merged 15 commits into from
Feb 6, 2024
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions go.work.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1278,8 +1278,7 @@ github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.14.3/go.mod h1:3WXPzbXEEliJ+a6UFE4vhIxV8qR1EML6ngzP9ug4eYg=
github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo=
github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0=
github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
Expand Down Expand Up @@ -1618,7 +1617,6 @@ golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
Expand Down
2 changes: 1 addition & 1 deletion mass-deployer/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ require (
github.com/cosmos/go-bip39 v1.0.0
github.com/gliderlabs/ssh v0.3.6
github.com/go-playground/validator/v10 v10.17.0
github.com/hashicorp/go-multierror v1.1.1
github.com/rs/zerolog v1.32.0
github.com/sethvargo/go-retry v0.2.4
github.com/spf13/cobra v1.8.0
Expand Down Expand Up @@ -42,7 +43,6 @@ require (
github.com/gtank/merlin v0.1.1 // indirect
github.com/gtank/ristretto255 v0.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-base58 v0.0.0-20150317085156-6237cf65f3a6 // indirect
Expand Down
145 changes: 95 additions & 50 deletions mass-deployer/pkg/mass-deployer/deployer.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"sync"
"time"

"github.com/hashicorp/go-multierror"
"github.com/sethvargo/go-retry"
"gopkg.in/yaml.v3"

Expand Down Expand Up @@ -38,25 +39,31 @@ func RunDeployer(ctx context.Context, cfg Config, output string, debug bool) err

for _, nodeGroup := range cfg.NodeGroups {
log.Info().Str("Node group", nodeGroup.Name).Msg("Running deployment")
var groupDeployments groupDeploymentsInfo
trial := 1

if err := retry.Do(ctx, retry.WithMaxRetries(cfg.MaxRetries, retry.NewConstant(1*time.Nanosecond)), func(ctx context.Context) error {
if trial != 1 {
log.Info().Str("Node group", nodeGroup.Name).Int("Deployment trial", trial).Msg("Retrying to deploy")
}

info, err := deployNodeGroup(ctx, tfPluginClient, nodeGroup, cfg.Vms, cfg.SSHKeys)
info, err := deployNodeGroup(ctx, tfPluginClient, &groupDeployments, nodeGroup, cfg.Vms, cfg.SSHKeys)
if err != nil {
trial++
log.Debug().Err(err).Str("Node group", nodeGroup.Name).Msg("failed to deploy")
return retry.RetryableError(err)
}

passedGroups[nodeGroup.Name] = info
log.Info().Str("Node group", nodeGroup.Name).Msg("Done deploying")
passedGroups[nodeGroup.Name] = info
return nil
}); err != nil {

failedGroups[nodeGroup.Name] = err.Error()
err := tfPluginClient.CancelByProjectName(nodeGroup.Name)
if err != nil {
log.Debug().Err(err).Send()
}
}
}

Expand Down Expand Up @@ -85,24 +92,32 @@ func RunDeployer(ctx context.Context, cfg Config, output string, debug bool) err
return os.WriteFile(output, outputBytes, 0644)
}

func deployNodeGroup(ctx context.Context, tfPluginClient deployer.TFPluginClient, nodeGroup NodesGroup, vms []Vms, sshKeys map[string]string) ([]vmOutput, error) {
func deployNodeGroup(ctx context.Context, tfPluginClient deployer.TFPluginClient, groupDeployments *groupDeploymentsInfo, nodeGroup NodesGroup, vms []Vms, sshKeys map[string]string) ([]vmOutput, error) {
log.Info().Str("Node group", nodeGroup.Name).Msg("Filter nodes")
nodesIDs, err := filterNodes(ctx, tfPluginClient, nodeGroup)
if err != nil {
return nil, err
}
log.Debug().Ints("nodes IDs", nodesIDs).Send()

log.Debug().Str("Node group", nodeGroup.Name).Msg("Parsing vms group")
groupsDeployments := parseVMsGroup(vms, nodeGroup.Name, nodesIDs, sshKeys)
if groupDeployments.networkDeployments == nil {
log.Debug().Str("Node group", nodeGroup.Name).Msg("Parsing vms group")
*groupDeployments = parseVMsGroup(vms, nodeGroup.Name, nodesIDs, sshKeys)
} else {
log.Debug().Str("Node group", nodeGroup.Name).Msg("Updating vms group")
updateFailedDeployments(ctx, tfPluginClient, nodesIDs, groupDeployments)
}

log.Info().Str("Node group", nodeGroup.Name).Msg("Starting mass deployment")
info, err := massDeploy(ctx, tfPluginClient, groupsDeployments)
err = massDeploy(ctx, tfPluginClient, groupDeployments)
if err != nil {
return nil, err
}

return info, nil
log.Debug().Msg("Load deployments")
vmsInfo, err := loadGroupInfo(tfPluginClient, groupDeployments.vmDeployments)

return vmsInfo, err
}

func parseVMsGroup(vms []Vms, nodeGroup string, nodesIDs []int, sshKeys map[string]string) groupDeploymentsInfo {
Expand All @@ -117,31 +132,27 @@ func parseVMsGroup(vms []Vms, nodeGroup string, nodesIDs []int, sshKeys map[stri
return buildDeployments(vmsOfNodeGroup, nodeGroup, nodesIDs, sshKeys)
}

func massDeploy(ctx context.Context, tfPluginClient deployer.TFPluginClient, deployments groupDeploymentsInfo) ([]vmOutput, error) {
log.Debug().Msg("Deploy networks")
err := tfPluginClient.NetworkDeployer.BatchDeploy(ctx, deployments.networkDeployments)
if err != nil {
cancelContractsOfFailedDeployments(tfPluginClient, deployments.networkDeployments, []*workloads.Deployment{})
return nil, err
func massDeploy(ctx context.Context, tfPluginClient deployer.TFPluginClient, deployments *groupDeploymentsInfo) error {
// deploy only contracts that need to be deployed
networks, vms := getNotDeployedDeployments(tfPluginClient, deployments)
var multiErr error

log.Debug().Msg(fmt.Sprintf("Deploying %d networks, this may to take a while", len(deployments.networkDeployments)))
if err := tfPluginClient.NetworkDeployer.BatchDeploy(ctx, networks); err != nil {
multiErr = multierror.Append(multiErr, err)
}

log.Debug().Msg(fmt.Sprintf("Deploying %d virtual machines, this may to take a while", len(deployments.vmDeployments)))
err = tfPluginClient.DeploymentDeployer.BatchDeploy(ctx, deployments.vmDeployments)
if err != nil {
cancelContractsOfFailedDeployments(tfPluginClient, deployments.networkDeployments, deployments.vmDeployments)
return nil, err
if err := tfPluginClient.DeploymentDeployer.BatchDeploy(ctx, vms); err != nil {
multiErr = multierror.Append(multiErr, err)
}

log.Debug().Msg("Load deployments")
vmsInfo := loadDeploymentsInfo(tfPluginClient, deployments.deploymentsInfo)

return vmsInfo, nil
return multiErr
}

func buildDeployments(vms []Vms, nodeGroup string, nodesIDs []int, sshKeys map[string]string) groupDeploymentsInfo {
var vmDeployments []*workloads.Deployment
var networkDeployments []*workloads.ZNet
var deploymentsInfo []vmDeploymentInfo
var nodesIDsIdx int

// here we loop over all groups of vms within the same node group, and for every group
Expand Down Expand Up @@ -196,54 +207,53 @@ func buildDeployments(vms []Vms, nodeGroup string, nodesIDs []int, sshKeys map[s

vmDeployments = append(vmDeployments, &deployment)
networkDeployments = append(networkDeployments, &network)
deploymentsInfo = append(deploymentsInfo, vmDeploymentInfo{nodeID: nodeID, deploymentName: deployment.Name, vmName: vm.Name})
}
}
return groupDeploymentsInfo{vmDeployments: vmDeployments, networkDeployments: networkDeployments, deploymentsInfo: deploymentsInfo}
return groupDeploymentsInfo{vmDeployments: vmDeployments, networkDeployments: networkDeployments}
}

func cancelContractsOfFailedDeployments(tfPluginClient deployer.TFPluginClient, networkDeployments []*workloads.ZNet, vmDeployments []*workloads.Deployment) {
contracts := []uint64{}
for _, network := range networkDeployments {
for _, contract := range network.NodeDeploymentID {
if contract != 0 {
contracts = append(contracts, contract)
}
func getNotDeployedDeployments(tfPluginClient deployer.TFPluginClient, groupDeployments *groupDeploymentsInfo) ([]*workloads.ZNet, []*workloads.Deployment) {
var failedVmDeployments []*workloads.Deployment
var failedNetworkDeployments []*workloads.ZNet

for i := range groupDeployments.networkDeployments {
if isFailedNetwork(*groupDeployments.networkDeployments[i]) {
failedNetworkDeployments = append(failedNetworkDeployments, groupDeployments.networkDeployments[i])
}
}

for _, vm := range vmDeployments {
if vm.ContractID != 0 {
contracts = append(contracts, vm.ContractID)
if groupDeployments.vmDeployments[i].ContractID == 0 {
failedVmDeployments = append(failedVmDeployments, groupDeployments.vmDeployments[i])
}
}

err := tfPluginClient.BatchCancelContract(contracts)
if err != nil {
log.Debug().Err(err)
}

return failedNetworkDeployments, failedVmDeployments
}

func loadDeploymentsInfo(tfPluginClient deployer.TFPluginClient, deployments []vmDeploymentInfo) []vmOutput {
func loadGroupInfo(tfPluginClient deployer.TFPluginClient, vmDeployments []*workloads.Deployment) ([]vmOutput, error) {
vmsInfo := []vmOutput{}
var multiErr error
var lock sync.Mutex
var wg sync.WaitGroup

for _, info := range deployments {
for _, deployment := range vmDeployments {
wg.Add(1)

go func(depInfo vmDeploymentInfo) {
go func(deployment workloads.Deployment) {
defer wg.Done()
log.Debug().
Str("vm", depInfo.vmName).
Str("vm", deployment.Name).
Msg("loading vm info from state")

vmDeployment, err := tfPluginClient.State.LoadDeploymentFromGrid(depInfo.nodeID, depInfo.deploymentName)
vmDeployment, err := tfPluginClient.State.LoadDeploymentFromGrid(deployment.NodeID, deployment.Name)
if err != nil {
lock.Lock()
defer lock.Unlock()
multiErr = multierror.Append(multiErr, err)
log.Debug().Err(err).
Str("vm", depInfo.vmName).
Str("deployment", depInfo.deploymentName).
Uint32("node ID", depInfo.nodeID).
Str("vm", deployment.Vms[0].Name).
Str("deployment", deployment.Name).
Uint32("node ID", deployment.NodeID).
Msg("couldn't load from state")
return
}
Expand All @@ -264,11 +274,11 @@ func loadDeploymentsInfo(tfPluginClient deployer.TFPluginClient, deployments []v
lock.Lock()
defer lock.Unlock()
vmsInfo = append(vmsInfo, vmInfo)
}(info)
}(*deployment)
}

wg.Wait()
return vmsInfo

return vmsInfo, multiErr
}

func parseDisks(name string, disks []Disk) (disksWorkloads []workloads.Disk, mountsWorkloads []workloads.Mount) {
Expand All @@ -283,3 +293,38 @@ func parseDisks(name string, disks []Disk) (disksWorkloads []workloads.Disk, mou
}
return
}

func updateFailedDeployments(ctx context.Context, tfPluginClient deployer.TFPluginClient, nodesIDs []int, groupDeployments *groupDeploymentsInfo) {
var contractsToBeCanceled []*workloads.ZNet
for idx, network := range groupDeployments.networkDeployments {
if groupDeployments.vmDeployments[idx].ContractID == 0 {
contractsToBeCanceled = append(contractsToBeCanceled, network)
}
}

err := tfPluginClient.NetworkDeployer.BatchCancel(ctx, contractsToBeCanceled)
if err != nil {
log.Debug().Err(err)
}

for idx, deployment := range groupDeployments.vmDeployments {
if deployment.ContractID == 0 || isFailedNetwork(*groupDeployments.networkDeployments[idx]) {
nodeID := uint32(nodesIDs[idx%len(nodesIDs)])
groupDeployments.vmDeployments[idx].NodeID = nodeID
groupDeployments.networkDeployments[idx].Nodes = []uint32{nodeID}
}
}
}

func isFailedNetwork(network workloads.ZNet) bool {
if len(network.NodeDeploymentID) == 0 {
return true
} else {
for _, contract := range network.NodeDeploymentID {
if contract == 0 {
return true
}
}
}
return false
}
7 changes: 0 additions & 7 deletions mass-deployer/pkg/mass-deployer/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,6 @@ type Disk struct {
type groupDeploymentsInfo struct {
vmDeployments []*workloads.Deployment
networkDeployments []*workloads.ZNet
deploymentsInfo []vmDeploymentInfo
}

type vmDeploymentInfo struct {
nodeID uint32
vmName string
deploymentName string
}

type vmOutput struct {
Expand Down
Loading