Skip to content
This repository has been archived by the owner on Jan 28, 2022. It is now read-only.

Commit

Permalink
Add support for DBFS Blocks and Clusters (#55)
Browse files Browse the repository at this point in the history
* init commit on cluster code generation

* generate dbfsblock types

* edit sample crd

* change dcluster status type to support float values, generate crd

* fix existing cluster setting is a pointer to bool

* implement logic for dbfs blocks

* add dbfsblock update support

* implemented cluster, improved test coverage

* make byte secret to be base64 encoded

* fix naming convention and add FromDataBricksClusterInfo description
  • Loading branch information
xinsnake authored and Azadehkhojandi committed Jul 29, 2019
1 parent 04c2c3e commit c273324
Show file tree
Hide file tree
Showing 49 changed files with 2,162 additions and 1,241 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ docker-push:
# download controller-gen if necessary
controller-gen:
ifeq (, $(shell which controller-gen))
go get sigs.k8s.io/controller-tools/cmd/[email protected].2
go get sigs.k8s.io/controller-tools/cmd/[email protected].4
CONTROLLER_GEN="$(shell go env GOPATH)/bin/controller-gen"
else
CONTROLLER_GEN="$(shell which controller-gen)"
Expand Down
6 changes: 6 additions & 0 deletions PROJECT
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,9 @@ resources:
- group: databricks
version: v1
kind: Run
- group: databricks
version: v1
kind: Dcluster
- group: databricks
version: v1
kind: DbfsBlock
114 changes: 114 additions & 0 deletions api/v1/dbfsblock_types.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
/*
Copyright 2019 microsoft.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1

import (
"crypto/sha1"
"encoding/base64"
"fmt"

dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// DbfsBlockSpec defines the desired state of DbfsBlock
type DbfsBlockSpec struct {
Path string `json:"path,omitempty"`
Data string `json:"data,omitempty"`
}

// DbfsBlockStatus defines the observed state of DbfsBlock
type DbfsBlockStatus struct {
FileInfo *dbmodels.FileInfo `json:"file_info,omitempty"`
FileHash string `json:"file_hash,omitempty"`
}

// +kubebuilder:object:root=true

// DbfsBlock is the Schema for the dbfsblocks API
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="SHA1SUM",type="string",JSONPath=".status.file_hash"
// +kubebuilder:printcolumn:name="Path",type="string",JSONPath=".status.file_info.path"
// +kubebuilder:printcolumn:name="Size",type="integer",JSONPath=".status.file_info.file_size"
type DbfsBlock struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

Spec *DbfsBlockSpec `json:"spec,omitempty"`
Status *DbfsBlockStatus `json:"status,omitempty"`
}

func (dbfsBlock *DbfsBlock) IsBeingDeleted() bool {
return !dbfsBlock.ObjectMeta.DeletionTimestamp.IsZero()
}

func (dbfsBlock *DbfsBlock) IsSubmitted() bool {
if dbfsBlock.Status == nil ||
dbfsBlock.Status.FileInfo == nil ||
dbfsBlock.Status.FileInfo.Path == "" {
return false
}
return true
}

// IsUpToDate tells you whether the data is up-to-date with the status
func (dbfsBlock *DbfsBlock) IsUpToDate() bool {
if dbfsBlock.Status == nil {
return false
}
h := dbfsBlock.GetHash()
return h == dbfsBlock.Status.FileHash
}

// GetHash returns the sha1 hash of the decoded data attribute
func (dbfsBlock *DbfsBlock) GetHash() string {
data, err := base64.StdEncoding.DecodeString(dbfsBlock.Spec.Data)
if err != nil {
return ""
}
h := sha1.New()
h.Write(data)
bs := h.Sum(nil)
return fmt.Sprintf("%x", bs)
}

const DbfsBlockFinalizerName = "dbfsBlock.finalizers.databricks.microsoft.com"

func (dbfsBlock *DbfsBlock) HasFinalizer(finalizerName string) bool {
return containsString(dbfsBlock.ObjectMeta.Finalizers, finalizerName)
}

func (dbfsBlock *DbfsBlock) AddFinalizer(finalizerName string) {
dbfsBlock.ObjectMeta.Finalizers = append(dbfsBlock.ObjectMeta.Finalizers, finalizerName)
}

func (dbfsBlock *DbfsBlock) RemoveFinalizer(finalizerName string) {
dbfsBlock.ObjectMeta.Finalizers = removeString(dbfsBlock.ObjectMeta.Finalizers, finalizerName)
}

// +kubebuilder:object:root=true

// DbfsBlockList contains a list of DbfsBlock
type DbfsBlockList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DbfsBlock `json:"items"`
}

func init() {
SchemeBuilder.Register(&DbfsBlock{}, &DbfsBlockList{})
}
148 changes: 148 additions & 0 deletions api/v1/dbfsblock_types_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
/*
Copyright 2019 microsoft.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1

import (
"time"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models"

"golang.org/x/net/context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)

// These tests are written in BDD-style using Ginkgo framework. Refer to
// http://onsi.github.io/ginkgo to learn more.

var _ = Describe("DbfsBlock", func() {
var (
key types.NamespacedName
created, fetched *DbfsBlock
)

BeforeEach(func() {
// Add any setup steps that needs to be executed before each test
})

AfterEach(func() {
// Add any teardown steps that needs to be executed after each test
})

// Add Tests for OpenAPI validation (or additonal CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.
Context("Create API", func() {

It("should create an object successfully", func() {

key = types.NamespacedName{
Name: "foo",
Namespace: "default",
}
created = &DbfsBlock{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
}}

By("creating an API obj")
Expect(k8sClient.Create(context.TODO(), created)).To(Succeed())

fetched = &DbfsBlock{}
Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed())
Expect(fetched).To(Equal(created))

By("deleting the created object")
Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed())
Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed())
})

It("should correctly handle isSubmitted", func() {
dbfsBlock := &DbfsBlock{
Status: &DbfsBlockStatus{
FileInfo: &dbmodels.FileInfo{
FileSize: 0,
},
},
}
Expect(dbfsBlock.IsSubmitted()).To(BeFalse())

dbfsBlock2 := &DbfsBlock{
Status: &DbfsBlockStatus{
FileInfo: &dbmodels.FileInfo{
Path: "/test-path",
},
},
}
Expect(dbfsBlock2.IsSubmitted()).To(BeTrue())

dbfsBlock3 := &DbfsBlock{
Status: &DbfsBlockStatus{
FileInfo: nil,
},
}
Expect(dbfsBlock3.IsSubmitted()).To(BeFalse())
})

It("should correctly handle finalizers", func() {
dbfsBlock := &DbfsBlock{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &metav1.Time{
Time: time.Now(),
},
},
}
Expect(dbfsBlock.IsBeingDeleted()).To(BeTrue())

dbfsBlock.AddFinalizer(DbfsBlockFinalizerName)
Expect(len(dbfsBlock.GetFinalizers())).To(Equal(1))
Expect(dbfsBlock.HasFinalizer(DbfsBlockFinalizerName)).To(BeTrue())

dbfsBlock.RemoveFinalizer(DbfsBlockFinalizerName)
Expect(len(dbfsBlock.GetFinalizers())).To(Equal(0))
Expect(dbfsBlock.HasFinalizer(DbfsBlockFinalizerName)).To(BeFalse())
})

It("should correctly handle file hash", func() {
dbfsBlock := &DbfsBlock{
Spec: &DbfsBlockSpec{
Data: "dGVzdA==",
},
}

Expect(dbfsBlock.GetHash()).To(Equal("a94a8fe5ccb19ba61c4c0873d391e987982fbbd3"))
Expect(dbfsBlock.IsUpToDate()).To(BeFalse())

dbfsBlock.Status = &DbfsBlockStatus{
FileHash: "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3",
}
Expect(dbfsBlock.IsUpToDate()).To(BeTrue())

dbfsBlockError := &DbfsBlock{
Spec: &DbfsBlockSpec{
Data: "invalid_base64",
},
}
Expect(dbfsBlockError.GetHash()).To(Equal(""))
})
})

})
81 changes: 81 additions & 0 deletions api/v1/dcluster_types.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
/*
Copyright 2019 microsoft.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1

import (
dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

type DclusterStatus struct {
ClusterInfo *DclusterInfo `json:"cluster_info,omitempty"`
}

// +kubebuilder:object:root=true

// Dcluster is the Schema for the dclusters API
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:printcolumn:name="ClusterID",type="integer",JSONPath=".status.cluster_info.cluster_id"
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.cluster_info.state"
// +kubebuilder:printcolumn:name="NumWorkers",type="integer",JSONPath=".status.cluster_info.num_workers"
type Dcluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

Spec *dbmodels.NewCluster `json:"spec,omitempty"`
Status *DclusterStatus `json:"status,omitempty"`
}

func (dcluster *Dcluster) IsBeingDeleted() bool {
return !dcluster.ObjectMeta.DeletionTimestamp.IsZero()
}

func (dcluster *Dcluster) IsSubmitted() bool {
if dcluster.Status == nil ||
dcluster.Status.ClusterInfo == nil ||
dcluster.Status.ClusterInfo.ClusterID == "" {
return false
}
return true
}

const DclusterFinalizerName = "dcluster.finalizers.databricks.microsoft.com"

func (dcluster *Dcluster) HasFinalizer(finalizerName string) bool {
return containsString(dcluster.ObjectMeta.Finalizers, finalizerName)
}

func (dcluster *Dcluster) AddFinalizer(finalizerName string) {
dcluster.ObjectMeta.Finalizers = append(dcluster.ObjectMeta.Finalizers, finalizerName)
}

func (dcluster *Dcluster) RemoveFinalizer(finalizerName string) {
dcluster.ObjectMeta.Finalizers = removeString(dcluster.ObjectMeta.Finalizers, finalizerName)
}

// +kubebuilder:object:root=true

// DclusterList contains a list of Dcluster
type DclusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Dcluster `json:"items"`
}

func init() {
SchemeBuilder.Register(&Dcluster{}, &DclusterList{})
}
Loading

0 comments on commit c273324

Please sign in to comment.