Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Migrate to the new KubeOneCluster API #382

Closed
wants to merge 15 commits into from
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"

"github.com/kubermatic/kubeone/pkg/config"
kubeoneapi "github.com/kubermatic/kubeone/pkg/apis/kubeone"
"github.com/kubermatic/kubeone/pkg/installer/util"
"github.com/kubermatic/kubeone/pkg/ssh"

Expand Down
81 changes: 33 additions & 48 deletions config.yaml.dist
Original file line number Diff line number Diff line change
Expand Up @@ -15,24 +15,23 @@
# This file contains the configuration for installing a single Kubernetes
# clusters using KubeOne. You can augment some options by providing
# Terraform output at runtime, like explained in the documentation.

apiVersion: kubeone.io/v1alpha1
kind: KubeOneCluster
name: my-demo-cluster

versions:
kubernetes: "1.14.1"

network:
clusterNetwork:
# the subnet used for pods (flannel);
# leave it empty for default: 10.244.0.0/16
pod_subnet: ""

podSubnet: ""
# the subnet used for services;
# leave it empty for default: 10.96.0.0/12
service_subnet: ""

serviceSubnet: ""
# a nodePort range to reserve for services;
# leave it empty for default: 30000-32767
node_port_range: ""
nodePortRange: ""

provider:
# Supported cloud provider names:
Expand All @@ -43,119 +42,105 @@ provider:
# * openstack
# * vsphere
name: ""

# Set kubelet flag --cloud-provider=external, to be used with external
# Cloud Controller Managers (CCM).
external: false

# Path to file that will be uploaded and used as custom --cloud-config file.
cloud_config: ""

features:
# Enables PodSecurityPolicy admission plugin in API server, as well as creates
# default `privileged` PodSecurityPolicy, plus RBAC rules to authorize
# `kube-system` namespace pods to `use` it.
pod_security_policy:
podSecurityPolicy:
enable: false

# Enables dynamic audit logs.
# After enablig this, operator should create auditregistration.k8s.io/v1alpha1
# AuditSink object.
# More info: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#dynamic-backend
dynamic_audit_log:
dynamicAuditLog:
enable: false

# Opt-out from deploying metrics-server
# more info: https://github.com/kubernetes-incubator/metrics-server
metrics_server:
metricsServer:
# enabled by default
enable: true

# Enable OpenID-Connect support in API server
# More info: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens
openid_connect:
openidConnect:
enable: false

config:
# The URL of the OpenID issuer, only HTTPS scheme will be accepted. If
# set, it will be used to verify the OIDC JSON Web Token (JWT).
issuer_url: ""

issuerUrl: ""
# The client ID for the OpenID Connect client, must be set if
# issuer_url is set.
client_id: "kubernetes"

clientId: "kubernetes"
# The OpenID claim to use as the user name. Note that claims other than
# the default ('sub') is not guaranteed to be unique and immutable. This
# flag is experimental in kubernetes, please see the kubernetes
# authentication documentation for further details.
username_claim: "sub"

usernameClaim: "sub"
# If provided, all usernames will be prefixed with this value. If not
# provided, username claims other than 'email' are prefixed by the issuer
# URL to avoid clashes. To skip any prefixing, provide the value '-'.
username_prefix: "oidc:"

usernamePrefix: "oidc:"
# If provided, the name of a custom OpenID Connect claim for specifying
# user groups. The claim value is expected to be a string or array of
# strings. This flag is experimental in kubernetes, please see the
# kubernetes authentication documentation for further details.
groups_claim: "groups"

groupsClaim: "groups"
# If provided, all groups will be prefixed with this value to prevent
# conflicts with other authentication strategies.
groups_prefix: "oidc:"

groupsPrefix: "oidc:"
# Comma-separated list of allowed JOSE asymmetric signing algorithms. JWTs
# with a 'alg' header value not in this list will be rejected. Values are
# defined by RFC 7518 https://tools.ietf.org/html/rfc7518#section-3.1.
signing_algs: "RS256"

signingAlgs: "RS256"
# A key=value pair that describes a required claim in the ID Token. If
# set, the claim is verified to be present in the ID Token with a matching
# value. Only single pair is currently supported.
required_claim: ""

requiredClaim: ""
# If set, the OpenID server's certificate will be verified by one of the
# authorities in the oidc-ca-file, otherwise the host's root CA set will
# be used.
ca_file: ""
caFile: ""

# The list of nodes can be overwritten by providing Terraform output.
# You are strongly encouraged to provide an odd number of nodes and
# have at least three of them.
# Remember to only specify your *master* nodes.
# hosts:
# - public_address: '1.2.3.4'
# private_address: '172.18.0.1'
# ssh_port: 22 # can be left out if using the default (22)
# ssh_username: ubuntu

# # Uou usually want to configure either a private key OR an
# - publicAddress: '1.2.3.4'
# privateAddress: '172.18.0.1'
# sshPort: 22 # can be left out if using the default (22)
# sshUsername: ubuntu
# # You usually want to configure either a private key OR an
# # agent socket, but never both. The socket value can be
# # prefixed with "env:" to refer to an environment variable.
# ssh_private_key_file: '/home/me/.ssh/id_rsa'
# ssh_agent_socket: 'env:SSH_AUTH_SOCK'
# sshPrivateKeyFile: '/home/me/.ssh/id_rsa'
# sshAgentSocket: 'env:SSH_AUTH_SOCK'

# The API server can also be overwritten by Terraform. Provide the
# external address of your load balancer or the public address of
# your first node.
# apiserver:
# address: '1.2.3.4'
# apiEndpoints:
# - host: '1.2.3.4'
# port: 6443

# If the cluster runs on bare metal or an unsupported cloud provider,
# you can disable the machine-controller deployment entirely. In this
# case, anything you configure in your "workers" sections is ignored.
#machine_controller:
#machineController:
# deploy: false

# Proxy is used to configure HTTP_PROXY, HTTPS_PROXY and NO_PROXY
# for Docker daemon and kubelet, and to be used when provisioning cluster
# (e.g. for curl, apt-get..).
# proxy:
# http_proxy: 'http://1.2.3.4'
# https_proxy: 'https://1.2.3.4'
# no_proxy: '1.2.3.4'
# http: 'http://1.2.3.4'
# https: 'https://1.2.3.4'
# noProxy: '1.2.3.4'

# KubeOne can automatically create MachineDeployments to create
# worker nodes in your cluster. Each element in this "workers"
Expand Down
4 changes: 2 additions & 2 deletions pkg/certificate/ca.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ package certificate
import (
"github.com/pkg/errors"

"github.com/kubermatic/kubeone/pkg/config"
kubeoneapi "github.com/kubermatic/kubeone/pkg/apis/kubeone"
"github.com/kubermatic/kubeone/pkg/ssh"
"github.com/kubermatic/kubeone/pkg/util"
)

// DownloadCA grabs CA certs/keys from leader host
func DownloadCA(ctx *util.Context) error {
return ctx.RunTaskOnLeader(func(ctx *util.Context, _ *config.HostConfig, conn ssh.Connection) error {
return ctx.RunTaskOnLeader(func(ctx *util.Context, _ kubeoneapi.HostConfig, conn ssh.Connection) error {
_, _, err := ctx.Runner.Run(`
mkdir -p ./{{ .WORK_DIR }}/pki/etcd
sudo cp /etc/kubernetes/pki/ca.crt ./{{ .WORK_DIR }}/pki/
Expand Down
17 changes: 6 additions & 11 deletions pkg/cmd/install.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"

"github.com/kubermatic/kubeone/pkg/config"
kubeoneapi "github.com/kubermatic/kubeone/pkg/apis/kubeone"
"github.com/kubermatic/kubeone/pkg/installer"
)

Expand Down Expand Up @@ -76,7 +76,7 @@ It's possible to source information about hosts from Terraform output, using the

// runInstall provisions Kubernetes on the provided machines
func runInstall(logger *logrus.Logger, installOptions *installOptions) error {
cluster, err := loadClusterConfig(installOptions.Manifest)
cluster, err := loadClusterConfig(installOptions.Manifest, installOptions.TerraformState)
if err != nil {
return errors.Wrap(err, "failed to load cluster")
}
Expand All @@ -86,22 +86,17 @@ func runInstall(logger *logrus.Logger, installOptions *installOptions) error {
return errors.Wrap(err, "failed to create installer options")
}

if err = applyTerraform(installOptions.TerraformState, cluster); err != nil {
return errors.Wrap(err, "failed to apply Terraform options")
}

if err = cluster.DefaultAndValidate(); err != nil {
return err
}
// TODO(xmudrii): Remove the cluster object output and panic
fmt.Printf("\n\n%#v\n\n", cluster)
//panic(".")

return installer.NewInstaller(cluster, logger).Install(options)
}

func createInstallerOptions(clusterFile string, cluster *config.Cluster, options *installOptions) (*installer.Options, error) {
func createInstallerOptions(clusterFile string, cluster *kubeoneapi.KubeOneCluster, options *installOptions) (*installer.Options, error) {
if len(options.BackupFile) == 0 {
fullPath, _ := filepath.Abs(clusterFile)
clusterName := cluster.Name

options.BackupFile = filepath.Join(filepath.Dir(fullPath), fmt.Sprintf("%s.tar.gz", clusterName))
}

Expand Down
11 changes: 1 addition & 10 deletions pkg/cmd/kubeconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,20 +70,11 @@ func runKubeconfig(kubeconfigOptions *kubeconfigOptions) error {
return errors.New("no cluster config file given")
}

cluster, err := loadClusterConfig(kubeconfigOptions.Manifest)
cluster, err := loadClusterConfig(kubeconfigOptions.Manifest, kubeconfigOptions.TerraformState)
if err != nil {
return errors.Wrap(err, "failed to load cluster")
}

// apply terraform
if err = applyTerraform(kubeconfigOptions.TerraformState, cluster); err != nil {
return err
}

if err = cluster.DefaultAndValidate(); err != nil {
return err
}

kubeconfig, err := util.DownloadKubeconfig(cluster)
if err != nil {
return err
Expand Down
10 changes: 1 addition & 9 deletions pkg/cmd/reset.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,19 +73,11 @@ func runReset(logger *logrus.Logger, resetOptions *resetOptions) error {
return errors.New("no cluster config file given")
}

cluster, err := loadClusterConfig(resetOptions.Manifest)
cluster, err := loadClusterConfig(resetOptions.Manifest, resetOptions.TerraformState)
if err != nil {
return errors.Wrap(err, "failed to load cluster")
}

if err = applyTerraform(resetOptions.TerraformState, cluster); err != nil {
return err
}

if err = cluster.DefaultAndValidate(); err != nil {
return err
}

options := &installer.Options{
Verbose: resetOptions.Verbose,
DestroyWorkers: resetOptions.DestroyWorkers,
Expand Down
49 changes: 6 additions & 43 deletions pkg/cmd/shared.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,12 @@ limitations under the License.
package cmd

import (
"io/ioutil"
"os"

"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"

"github.com/kubermatic/kubeone/pkg/config"
"github.com/kubermatic/kubeone/pkg/terraform"
kubeoneapi "github.com/kubermatic/kubeone/pkg/apis/kubeone"
"github.com/kubermatic/kubeone/pkg/util/config"
)

const (
Expand Down Expand Up @@ -73,44 +69,11 @@ func initLogger(verbose bool) *logrus.Logger {
return logger
}

func loadClusterConfig(filename string) (*config.Cluster, error) {
content, err := ioutil.ReadFile(filename)
func loadClusterConfig(filename, terraformOutputPath string) (*kubeoneapi.KubeOneCluster, error) {
a, err := config.LoadKubeOneCluster(filename, terraformOutputPath)
if err != nil {
return nil, errors.Wrap(err, "failed to read file")
}

cluster := config.Cluster{}
if err := yaml.Unmarshal(content, &cluster); err != nil {
return nil, errors.Wrap(err, "failed to decode file as JSON")
}

return &cluster, nil
}

func applyTerraform(tf string, cluster *config.Cluster) error {
if tf == "" {
return nil
}

var (
tfJSON []byte
err error
)

if tf == "-" {
if tfJSON, err = ioutil.ReadAll(os.Stdin); err != nil {
return errors.Wrap(err, "unable to load Terraform output from stdin")
}
} else {
if tfJSON, err = ioutil.ReadFile(tf); err != nil {
return errors.Wrap(err, "unable to load Terraform output from file")
}
}

var tfConfig *terraform.Config
if tfConfig, err = terraform.NewConfigFromJSON(tfJSON); err != nil {
return errors.Wrap(err, "failed to parse Terraform config")
return nil, errors.Wrap(err, "unable to load a given KubeOneCluster object")
}

return tfConfig.Apply(cluster)
return a, nil
}
11 changes: 1 addition & 10 deletions pkg/cmd/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,21 +71,12 @@ It's possible to source information about hosts from Terraform output, using the

// runUpgrade upgrades Kubernetes on the provided machines
func runUpgrade(logger *logrus.Logger, upgradeOptions *upgradeOptions) error {
cluster, err := loadClusterConfig(upgradeOptions.Manifest)
cluster, err := loadClusterConfig(upgradeOptions.Manifest, upgradeOptions.TerraformState)
if err != nil {
return errors.Wrap(err, "failed to load cluster")
}

options := createUpgradeOptions(upgradeOptions)

if err = applyTerraform(upgradeOptions.TerraformState, cluster); err != nil {
return errors.Wrap(err, "failed to parse terraform state")
}

if err = cluster.DefaultAndValidate(); err != nil {
return err
}

return upgrader.NewUpgrader(cluster, logger).Upgrade(options)
}

Expand Down
Loading