Skip to content

Commit

Permalink
Merge branch 'main' of github.com:dragonflyoss/Dragonfly2 into featur…
Browse files Browse the repository at this point in the history
…e/features

Signed-off-by: Gaius <[email protected]>
  • Loading branch information
gaius-qi committed Mar 30, 2023
2 parents bf32ad7 + 3fbeac2 commit 8b23d60
Show file tree
Hide file tree
Showing 11 changed files with 69 additions and 42 deletions.
3 changes: 2 additions & 1 deletion client/daemon/peer/traffic_shaper.go
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,8 @@ func (ts *samplingTrafficShaper) Record(taskID string, n int) {
ts.RLock()
if task, ok := ts.tasks[taskID]; ok {
task.lastSecondBandwidth.Add(int64(n))
ts.Debugf("the task %s is not found when record it", taskID)
} else {
ts.Warnf("the task %s is not found when record it", taskID)
}
ts.RUnlock()
}
Expand Down
5 changes: 2 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module d7y.io/dragonfly/v2
go 1.20

require (
d7y.io/api v1.8.4
d7y.io/api v1.8.5
github.com/RichardKnop/machinery v1.10.6
github.com/Showmax/go-fqdn v1.0.0
github.com/VividCortex/mysqlerr v1.0.0
Expand Down Expand Up @@ -78,7 +78,7 @@ require (
golang.org/x/sys v0.6.0
golang.org/x/time v0.3.0
google.golang.org/api v0.114.0
google.golang.org/grpc v1.53.0
google.golang.org/grpc v1.55.0-dev
google.golang.org/protobuf v1.30.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1
Expand Down Expand Up @@ -209,7 +209,6 @@ require (
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gorm.io/driver/sqlite v1.4.3 // indirect
gorm.io/driver/sqlserver v1.4.1 // indirect
gorm.io/plugin/dbresolver v1.3.0 // indirect
k8s.io/apimachinery v0.26.0 // indirect
Expand Down
14 changes: 6 additions & 8 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
d7y.io/api v1.8.4 h1:2qlV2y4viuhb9FwjHFkBY+o2jUVmYE64McSoI+AAYUc=
d7y.io/api v1.8.4/go.mod h1:a68kyDomG6hyShQUFD0CorxmU3RB4/NHfUzww8S/VYg=
d7y.io/api v1.8.5 h1:8LnSRrXiEY6XODK+GTgkrgBZkmONhLcyfDlarWpDrfw=
d7y.io/api v1.8.5/go.mod h1:HIJMfhqiBHJ0yNVuOASQe6X0IVzOkxdLiWzcMM0xo2c=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
Expand Down Expand Up @@ -744,9 +744,8 @@ github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWV
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.3 h1:j7a/xn1U6TKA/PHHxqZuzh64CdtRc7rU9M+AvkOl5bA=
github.com/mattn/go-sqlite3 v1.14.3/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
Expand Down Expand Up @@ -1649,8 +1648,8 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.55.0-dev h1:b3WG8LoyS+X/C5ZbIWsJGjt8Hhqq0wUVX8+rPF/BHZo=
google.golang.org/grpc v1.55.0-dev/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
Expand Down Expand Up @@ -1715,9 +1714,8 @@ gorm.io/driver/mysql v1.4.7/go.mod h1:SxzItlnT1cb6e1e4ZRpgJN2VYtcqJgqnHxWr4wsP8o
gorm.io/driver/postgres v1.2.2/go.mod h1:Ik3tK+a3FMp8ORZl29v4b3M0RsgXsaeMXh9s9eVMXco=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/driver/sqlite v1.1.3 h1:BYfdVuZB5He/u9dt4qDpZqiqDJ6KhPqs5QUqsr/Eeuc=
gorm.io/driver/sqlite v1.1.3/go.mod h1:AKDgRWk8lcSQSw+9kxCJnX/yySj8G3rdwYlU57cB45c=
gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU=
gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI=
gorm.io/driver/sqlserver v1.2.1/go.mod h1:nixq0OB3iLXZDiPv6JSOjWuPgpyaRpOIIevYtA4Ulb4=
gorm.io/driver/sqlserver v1.4.1 h1:t4r4r6Jam5E6ejqP7N82qAJIJAht27EGT41HyPfXRw0=
gorm.io/driver/sqlserver v1.4.1/go.mod h1:DJ4P+MeZbc5rvY58PnmN1Lnyvb5gw5NPzGshHDnJLig=
Expand Down
34 changes: 28 additions & 6 deletions manager/rpcserver/manager_server_v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ import (
"d7y.io/dragonfly/v2/manager/types"
pkgcache "d7y.io/dragonfly/v2/pkg/cache"
"d7y.io/dragonfly/v2/pkg/objectstorage"
"d7y.io/dragonfly/v2/pkg/structure"
)

// managerServerV1 is v1 version of the manager grpc server.
Expand Down Expand Up @@ -328,9 +329,10 @@ func (s *managerServerV1) GetScheduler(ctx context.Context, req *managerv1.GetSc
}
}

var features map[string]bool
for k, v := range scheduler.Features {
features[k] = v.(bool)
// Marshal features of scheduler.
features, err := scheduler.Features.MarshalJSON()
if err != nil {
return nil, status.Error(codes.DataLoss, err.Error())
}

// Construct scheduler.
Expand Down Expand Up @@ -400,26 +402,39 @@ func (s *managerServerV1) UpdateScheduler(ctx context.Context, req *managerv1.Up
log.Warn(err)
}

// Marshal features of scheduler.
features, err := scheduler.Features.MarshalJSON()
if err != nil {
return nil, status.Error(codes.DataLoss, err.Error())
}

return &managerv1.Scheduler{
Id: uint64(scheduler.ID),
Hostname: scheduler.Hostname,
Idc: scheduler.IDC,
Location: scheduler.Location,
Ip: scheduler.IP,
Port: scheduler.Port,
Features: features,
SchedulerClusterId: uint64(scheduler.SchedulerClusterID),
State: scheduler.State,
}, nil
}

// Create scheduler and associate cluster.
func (s *managerServerV1) createScheduler(ctx context.Context, req *managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error) {
features, err := structure.StructToMap(types.DefaultSchedulerFeatures)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

scheduler := models.Scheduler{
Hostname: req.Hostname,
IDC: req.Idc,
Location: req.Location,
IP: req.Ip,
Port: req.Port,
Features: features,
SchedulerClusterID: uint(req.SchedulerClusterId),
}

Expand Down Expand Up @@ -471,10 +486,10 @@ func (s *managerServerV1) ListSchedulers(ctx context.Context, req *managerv1.Lis

// Cache miss.
var schedulerClusters []models.SchedulerCluster
if err := s.db.WithContext(ctx).Preload("SecurityGroup.SecurityRules").Preload("SeedPeerClusters.SeedPeers", "state = ?", "active").Preload("Schedulers", "state = ?", "active").Find(&schedulerClusters).Error; err != nil {
if err := s.db.WithContext(ctx).Preload("SecurityGroup.SecurityRules").Preload("SeedPeerClusters.SeedPeers", "state = ?", "active").
Preload("Schedulers", "state = ?", "active").Find(&schedulerClusters).Error; err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

log.Debugf("list scheduler clusters %v with hostInfo %#v", getSchedulerClusterNames(schedulerClusters), req.HostInfo)

// Search optimal scheduler clusters.
Expand All @@ -484,7 +499,7 @@ func (s *managerServerV1) ListSchedulers(ctx context.Context, req *managerv1.Lis
candidateSchedulerClusters []models.SchedulerCluster
err error
)
candidateSchedulerClusters, err = s.searcher.FindSchedulerClusters(ctx, schedulerClusters, req.Hostname, req.Ip, req.HostInfo)
candidateSchedulerClusters, err = s.searcher.FindSchedulerClusters(ctx, schedulerClusters, req.Hostname, req.Ip, req.HostInfo, logger.CoreLogger)
if err != nil {
log.Error(err)
metrics.SearchSchedulerClusterFailureCount.WithLabelValues(req.Version, req.Commit).Inc()
Expand Down Expand Up @@ -521,6 +536,12 @@ func (s *managerServerV1) ListSchedulers(ctx context.Context, req *managerv1.Lis
}
}

// Marshal features of scheduler.
features, err := scheduler.Features.MarshalJSON()
if err != nil {
return nil, status.Error(codes.DataLoss, err.Error())
}

pbListSchedulersResponse.Schedulers = append(pbListSchedulersResponse.Schedulers, &managerv1.Scheduler{
Id: uint64(scheduler.ID),
Hostname: scheduler.Hostname,
Expand All @@ -529,6 +550,7 @@ func (s *managerServerV1) ListSchedulers(ctx context.Context, req *managerv1.Lis
Ip: scheduler.IP,
Port: scheduler.Port,
State: scheduler.State,
Features: features,
SchedulerClusterId: uint64(scheduler.SchedulerClusterID),
SeedPeers: seedPeers,
})
Expand Down
2 changes: 1 addition & 1 deletion manager/rpcserver/manager_server_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ func (s *managerServerV2) ListSchedulers(ctx context.Context, req *managerv2.Lis
candidateSchedulerClusters []models.SchedulerCluster
err error
)
candidateSchedulerClusters, err = s.searcher.FindSchedulerClusters(ctx, schedulerClusters, req.Hostname, req.Ip, req.HostInfo)
candidateSchedulerClusters, err = s.searcher.FindSchedulerClusters(ctx, schedulerClusters, req.Hostname, req.Ip, req.HostInfo, logger.CoreLogger)
if err != nil {
log.Error(err)
metrics.SearchSchedulerClusterFailureCount.WithLabelValues(req.Version, req.Commit).Inc()
Expand Down
9 changes: 5 additions & 4 deletions manager/searcher/mocks/searcher_mock.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

31 changes: 17 additions & 14 deletions manager/searcher/searcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (

"github.com/mitchellh/mapstructure"
"github.com/yl2chen/cidranger"
"go.uber.org/zap"

logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/manager/models"
Expand Down Expand Up @@ -85,12 +86,11 @@ type Scopes struct {

type Searcher interface {
// FindSchedulerClusters finds scheduler clusters that best matches the evaluation.
FindSchedulerClusters(ctx context.Context, schedulerClusters []models.SchedulerCluster, ip, hostname string, conditions map[string]string) ([]models.SchedulerCluster, error)
FindSchedulerClusters(ctx context.Context, schedulerClusters []models.SchedulerCluster, ip, hostname string,
conditions map[string]string, log *zap.SugaredLogger) ([]models.SchedulerCluster, error)
}

type searcher struct {
cidrs []string
}
type searcher struct{}

func New(pluginDir string) Searcher {
s, err := LoadPlugin(pluginDir)
Expand All @@ -104,7 +104,10 @@ func New(pluginDir string) Searcher {
}

// FindSchedulerClusters finds scheduler clusters that best matches the evaluation.
func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []models.SchedulerCluster, ip, hostname string, conditions map[string]string) ([]models.SchedulerCluster, error) {
func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []models.SchedulerCluster, ip, hostname string,
conditions map[string]string, log *zap.SugaredLogger) ([]models.SchedulerCluster, error) {
log = log.With("ip", ip, "hostname", hostname, "conditions", conditions)

if len(schedulerClusters) <= 0 {
return nil, errors.New("empty scheduler clusters")
}
Expand All @@ -119,16 +122,16 @@ func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters
func(i, j int) bool {
var si, sj Scopes
if err := mapstructure.Decode(clusters[i].Scopes, &si); err != nil {
logger.Errorf("cluster %s decode scopes failed: %v", clusters[i].Name, err)
log.Errorf("cluster %s decode scopes failed: %v", clusters[i].Name, err)
return false
}

if err := mapstructure.Decode(clusters[j].Scopes, &sj); err != nil {
logger.Errorf("cluster %s decode scopes failed: %v", clusters[i].Name, err)
log.Errorf("cluster %s decode scopes failed: %v", clusters[i].Name, err)
return false
}

return Evaluate(ip, hostname, conditions, si, clusters[i]) > Evaluate(ip, hostname, conditions, sj, clusters[j])
return Evaluate(ip, hostname, conditions, si, clusters[i], log) > Evaluate(ip, hostname, conditions, sj, clusters[j], log)
},
)

Expand Down Expand Up @@ -177,9 +180,9 @@ func FilterSchedulerClusters(conditions map[string]string, schedulerClusters []m
}

// Evaluate the degree of matching between scheduler cluster and dfdaemon.
func Evaluate(ip, hostname string, conditions map[string]string, scopes Scopes, cluster models.SchedulerCluster) float64 {
func Evaluate(ip, hostname string, conditions map[string]string, scopes Scopes, cluster models.SchedulerCluster, log *zap.SugaredLogger) float64 {
return securityDomainAffinityWeight*calculateSecurityDomainAffinityScore(conditions[ConditionSecurityDomain], cluster.SecurityGroup.SecurityRules) +
cidrAffinityWeight*calculateCIDRAffinityScore(ip, scopes.CIDRs) +
cidrAffinityWeight*calculateCIDRAffinityScore(ip, scopes.CIDRs, log) +
idcAffinityWeight*calculateIDCAffinityScore(conditions[ConditionIDC], scopes.IDC) +
locationAffinityWeight*calculateMultiElementAffinityScore(conditions[ConditionLocation], scopes.Location) +
clusterTypeWeight*calculateClusterTypeScore(cluster)
Expand All @@ -199,26 +202,26 @@ func calculateSecurityDomainAffinityScore(securityDomain string, securityRules [
}

// calculateCIDRAffinityScore 0.0~1.0 larger and better.
func calculateCIDRAffinityScore(ip string, cidrs []string) float64 {
func calculateCIDRAffinityScore(ip string, cidrs []string, log *zap.SugaredLogger) float64 {
// Construct CIDR ranger.
ranger := cidranger.NewPCTrieRanger()
for _, cidr := range cidrs {
_, network, err := net.ParseCIDR(cidr)
if err != nil {
logger.Error(err)
log.Error(err)
continue
}

if err := ranger.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
logger.Error(err)
log.Error(err)
continue
}
}

// Determine whether an IP is contained in the constructed networks ranger.
contains, err := ranger.Contains(net.ParseIP(ip))
if err != nil {
logger.Error(err)
log.Error(err)
return minScore
}

Expand Down
3 changes: 2 additions & 1 deletion manager/searcher/searcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (

"github.com/stretchr/testify/assert"

logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/manager/models"
)

Expand Down Expand Up @@ -751,7 +752,7 @@ func TestSearcher_FindSchedulerClusters(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
searcher := New(pluginDir)
clusters, found := searcher.FindSchedulerClusters(context.Background(), tc.schedulerClusters, "128.168.1.0", "foo", tc.conditions)
clusters, found := searcher.FindSchedulerClusters(context.Background(), tc.schedulerClusters, "128.168.1.0", "foo", tc.conditions, logger.CoreLogger)
tc.expect(t, clusters, found)
})
}
Expand Down
3 changes: 2 additions & 1 deletion manager/searcher/testdata/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"fmt"
"os"

logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/manager/models"
"d7y.io/dragonfly/v2/manager/searcher"
)
Expand All @@ -32,7 +33,7 @@ func main() {
os.Exit(1)
}

clusters, err := s.FindSchedulerClusters(context.Background(), []models.SchedulerCluster{}, "127.0.0.1", "foo", map[string]string{})
clusters, err := s.FindSchedulerClusters(context.Background(), []models.SchedulerCluster{}, "127.0.0.1", "foo", map[string]string{}, logger.CoreLogger)
if err != nil {
fmt.Println("scheduler cluster not found")
os.Exit(1)
Expand Down
4 changes: 3 additions & 1 deletion manager/searcher/testdata/plugin/searcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,13 @@ import (
"context"

"d7y.io/dragonfly/v2/manager/models"
"go.uber.org/zap"
)

type searcher struct{}

func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []models.SchedulerCluster, hostname, ip string, conditions map[string]string) ([]models.SchedulerCluster, error) {
func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []models.SchedulerCluster, hostname, ip string,
conditions map[string]string, log *zap.SugaredLogger) ([]models.SchedulerCluster, error) {
return []models.SchedulerCluster{{Name: "foo"}}, nil
}

Expand Down
3 changes: 1 addition & 2 deletions pkg/log/log.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package log
import (
"go.uber.org/zap/zapcore"

"d7y.io/dragonfly/v2/internal/dflog"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/dfpath"
)

Expand All @@ -35,7 +35,6 @@ func SetGrpcLevel(level zapcore.Level) {

// SetupDaemon sets daemon log config: path, console
func SetupDaemon(logDir string, verbose bool, console bool) error {

var options []dfpath.Option
if logDir != "" {
options = append(options, dfpath.WithLogDir(logDir))
Expand Down

0 comments on commit 8b23d60

Please sign in to comment.