diff --git a/.golangci.yml b/.golangci.yml index 261d25587d3..b6d2cfe15fd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -69,7 +69,22 @@ issues: linters: - gomnd - dupl +<<<<<<< HEAD - path: v1beta1 +======= + - path: pkg/apis/scheduling/v1beta1/conversion.go + linters: + - golint + - path: clientset + linters: + - godot + - path: informers + linters: + - godot + - path: v1alpha1 + linters: + - godot + - path: generated linters: - golint - deadcode @@ -77,3 +92,4 @@ issues: linters: - golint - deadcode + - godot diff --git a/cmd/cli/util/util.go b/cmd/cli/util/util.go index 7b464701e92..a1058841d9c 100644 --- a/cmd/cli/util/util.go +++ b/cmd/cli/util/util.go @@ -23,7 +23,7 @@ import ( "github.com/spf13/cobra" ) -// CheckError prints the error of commands +// CheckError prints the error of commands. func CheckError(cmd *cobra.Command, err error) { if err != nil { msg := "Failed to" diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index f8612a130a1..fde6485a692 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -54,7 +54,7 @@ func NewServerOption() *ServerOption { return &s } -// AddFlags adds flags for a specific CMServer to the specified FlagSet +// AddFlags adds flags for a specific CMServer to the specified FlagSet. func (s *ServerOption) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.KubeClientOptions.Master, "master", s.KubeClientOptions.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.StringVar(&s.KubeClientOptions.KubeConfig, "kubeconfig", s.KubeClientOptions.KubeConfig, "Path to kubeconfig file with authorization and master location information.") @@ -69,7 +69,7 @@ func (s *ServerOption) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.SchedulerName, "scheduler-name", defaultSchedulerName, "Volcano will handle pods whose .spec.SchedulerName is same as scheduler-name") } -// CheckOptionOrDie checks the LockObjectNamespace +// CheckOptionOrDie checks the LockObjectNamespace. func (s *ServerOption) CheckOptionOrDie() error { if s.EnableLeaderElection && s.LockObjectNamespace == "" { return fmt.Errorf("lock-object-namespace must not be nil when LeaderElection is enabled") diff --git a/cmd/controller-manager/app/server.go b/cmd/controller-manager/app/server.go index eab79f4f8c3..1f59aedb66c 100644 --- a/cmd/controller-manager/app/server.go +++ b/cmd/controller-manager/app/server.go @@ -50,7 +50,7 @@ const ( retryPeriod = 5 * time.Second ) -// Run the controller +// Run the controller. func Run(opt *options.ServerOption) error { config, err := kube.BuildConfig(opt.KubeClientOptions) if err != nil { diff --git a/cmd/scheduler/app/options/options.go b/cmd/scheduler/app/options/options.go index e6df9a36099..9030c2e2fb8 100644 --- a/cmd/scheduler/app/options/options.go +++ b/cmd/scheduler/app/options/options.go @@ -62,7 +62,7 @@ type ServerOption struct { PercentageOfNodesToFind int32 } -// ServerOpts server options +// ServerOpts server options. var ServerOpts *ServerOption // NewServerOption creates a new CMServer with a default config. @@ -73,7 +73,7 @@ func NewServerOption() *ServerOption { return &s } -// AddFlags adds flags for a specific CMServer to the specified FlagSet +// AddFlags adds flags for a specific CMServer to the specified FlagSet. func (s *ServerOption) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.KubeClientOptions.Master, "master", s.KubeClientOptions.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.StringVar(&s.KubeClientOptions.KubeConfig, "kubeconfig", s.KubeClientOptions.KubeConfig, "Path to kubeconfig file with authorization and master location information") @@ -103,7 +103,7 @@ func (s *ServerOption) AddFlags(fs *pflag.FlagSet) { fs.Int32Var(&s.PercentageOfNodesToFind, "percentage-nodes-to-find", defaultPercentageOfNodesToFind, "The percentage of nodes to find and score, if <=0 will be calcuated based on the cluster size") } -// CheckOptionOrDie check lock-object-namespace when LeaderElection is enabled +// CheckOptionOrDie check lock-object-namespace when LeaderElection is enabled. func (s *ServerOption) CheckOptionOrDie() error { if s.EnableLeaderElection && s.LockObjectNamespace == "" { return fmt.Errorf("lock-object-namespace must not be nil when LeaderElection is enabled") @@ -112,7 +112,7 @@ func (s *ServerOption) CheckOptionOrDie() error { return nil } -// RegisterOptions registers options +// RegisterOptions registers options. func (s *ServerOption) RegisterOptions() { ServerOpts = s } diff --git a/cmd/scheduler/app/server.go b/cmd/scheduler/app/server.go index a78fc5f05ce..49a41a7c6a3 100644 --- a/cmd/scheduler/app/server.go +++ b/cmd/scheduler/app/server.go @@ -52,7 +52,7 @@ const ( retryPeriod = 5 * time.Second ) -// Run the volcano scheduler +// Run the volcano scheduler. func Run(opt *options.ServerOption) error { if opt.PrintVersion { version.PrintVersionAndExit() diff --git a/cmd/webhook-manager/app/options/options.go b/cmd/webhook-manager/app/options/options.go index 2bc9b2fe9d3..913bef94eb8 100644 --- a/cmd/webhook-manager/app/options/options.go +++ b/cmd/webhook-manager/app/options/options.go @@ -44,13 +44,13 @@ type Config struct { WebhookURL string } -// NewConfig create new config +// NewConfig create new config. func NewConfig() *Config { c := Config{} return &c } -// AddFlags add flags +// AddFlags add flags. func (c *Config) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&c.KubeClientOptions.Master, "master", c.KubeClientOptions.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.StringVar(&c.KubeClientOptions.KubeConfig, "kubeconfig", c.KubeClientOptions.KubeConfig, "Path to kubeconfig file with authorization and master location information.") @@ -71,7 +71,7 @@ func (c *Config) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&c.SchedulerName, "scheduler-name", defaultSchedulerName, "Volcano will handle pods whose .spec.SchedulerName is same as scheduler-name") } -// CheckPortOrDie check valid port range +// CheckPortOrDie check valid port range. func (c *Config) CheckPortOrDie() error { if c.Port < 1 || c.Port > 65535 { return fmt.Errorf("the port should be in the range of 1 and 65535") diff --git a/cmd/webhook-manager/app/util.go b/cmd/webhook-manager/app/util.go index d391bf1d257..0106c1578b4 100644 --- a/cmd/webhook-manager/app/util.go +++ b/cmd/webhook-manager/app/util.go @@ -90,7 +90,7 @@ func getKubeClient(restConfig *rest.Config) *kubernetes.Clientset { return clientset } -// GetVolcanoClient get a clientset for volcano +// GetVolcanoClient get a clientset for volcano. func getVolcanoClient(restConfig *rest.Config) *versioned.Clientset { clientset, err := versioned.NewForConfig(restConfig) if err != nil { @@ -101,7 +101,7 @@ func getVolcanoClient(restConfig *rest.Config) *versioned.Clientset { // configTLS is a helper function that generate tls certificates from directly defined tls config or kubeconfig // These are passed in as command line for cluster certification. If tls config is passed in, we use the directly -// defined tls config, else use that defined in kubeconfig +// defined tls config, else use that defined in kubeconfig. func configTLS(config *options.Config, restConfig *rest.Config) *tls.Config { if len(config.CertFile) != 0 && len(config.KeyFile) != 0 { sCert, err := tls.LoadX509KeyPair(config.CertFile, config.KeyFile) diff --git a/pkg/apis/batch/v1alpha1/job.go b/pkg/apis/batch/v1alpha1/job.go index e8e95ba990f..29ee2358570 100644 --- a/pkg/apis/batch/v1alpha1/job.go +++ b/pkg/apis/batch/v1alpha1/job.go @@ -25,7 +25,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Job defines the volcano job +// Job defines the volcano job. type Job struct { metav1.TypeMeta `json:",inline"` @@ -42,7 +42,7 @@ type Job struct { Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } -// JobSpec describes how the job execution will look like and when it will actually run +// JobSpec describes how the job execution will look like and when it will actually run. type JobSpec struct { // SchedulerName is the default value of `tasks.template.spec.schedulerName`. // +optional @@ -92,7 +92,7 @@ type JobSpec struct { PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,10,opt,name=priorityClassName"` } -// VolumeSpec defines the specification of Volume, e.g. PVC +// VolumeSpec defines the specification of Volume, e.g. PVC. type VolumeSpec struct { // Path within the container at which the volume should be mounted. Must // not contain ':'. @@ -107,7 +107,7 @@ type VolumeSpec struct { VolumeClaim *v1.PersistentVolumeClaimSpec `json:"volumeClaim,omitempty" protobuf:"bytes,3,opt,name=volumeClaim"` } -// JobEvent job event +// JobEvent job event. type JobEvent string const ( @@ -155,7 +155,7 @@ type LifecyclePolicy struct { Timeout *metav1.Duration `json:"timeout,omitempty" protobuf:"bytes,4,opt,name=timeout"` } -// TaskSpec specifies the task specification of Job +// TaskSpec specifies the task specification of Job. type TaskSpec struct { // Name specifies the name of tasks // +optional @@ -175,7 +175,7 @@ type TaskSpec struct { Policies []LifecyclePolicy `json:"policies,omitempty" protobuf:"bytes,4,opt,name=policies"` } -// JobPhase defines the phase of the job +// JobPhase defines the phase of the job. type JobPhase string const ( @@ -220,7 +220,7 @@ type JobState struct { LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` } -// JobStatus represents the current status of a Job +// JobStatus represents the current status of a Job. type JobStatus struct { // Current state of Job. // +optional @@ -269,7 +269,7 @@ type JobStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// JobList defines the list of jobs +// JobList defines the list of jobs. type JobList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/pkg/apis/bus/v1alpha1/commands.go b/pkg/apis/bus/v1alpha1/commands.go index d7c15434927..12c73f4d8b4 100644 --- a/pkg/apis/bus/v1alpha1/commands.go +++ b/pkg/apis/bus/v1alpha1/commands.go @@ -7,7 +7,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Command defines command structure +// Command defines command structure. type Command struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -29,7 +29,7 @@ type Command struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// CommandList defines list of commands +// CommandList defines list of commands. type CommandList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/pkg/apis/helpers/helpers.go b/pkg/apis/helpers/helpers.go index 1825178f1d1..faa62f49f03 100644 --- a/pkg/apis/helpers/helpers.go +++ b/pkg/apis/helpers/helpers.go @@ -41,18 +41,20 @@ import ( schedulerv1beta1 "volcano.sh/volcano/pkg/apis/scheduling/v1beta1" ) -// JobKind creates job GroupVersionKind +// JobKind creates job GroupVersionKind. var JobKind = vcbatch.SchemeGroupVersion.WithKind("Job") -// CommandKind creates command GroupVersionKind +// CommandKind creates command GroupVersionKind. var CommandKind = vcbus.SchemeGroupVersion.WithKind("Command") -// V1beta1QueueKind is queue kind with v1alpha2 version +// V1beta1QueueKind is queue kind with v1alpha2 version. var V1beta1QueueKind = schedulerv1beta1.SchemeGroupVersion.WithKind("Queue") -// CreateOrUpdateConfigMap : -// 1. creates config map resource if not present -// 2. updates config map is necessary +/* +CreateOrUpdateConfigMap : +1. creates config map resource if not present +2. updates config map is necessary. +*/ func CreateOrUpdateConfigMap(job *vcbatch.Job, kubeClients kubernetes.Interface, data map[string]string, cmName string) error { // If ConfigMap does not exist, create one for Job. cmOld, err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Get(cmName, metav1.GetOptions{}) @@ -97,7 +99,7 @@ func CreateOrUpdateConfigMap(job *vcbatch.Job, kubeClients kubernetes.Interface, return nil } -// CreateSecret create secret +// CreateSecret create secret. func CreateSecret(job *vcbatch.Job, kubeClients kubernetes.Interface, data map[string][]byte, secretName string) error { secret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -118,7 +120,7 @@ func CreateSecret(job *vcbatch.Job, kubeClients kubernetes.Interface, data map[s return err } -// DeleteConfigmap deletes the config map resource +// DeleteConfigmap deletes the config map resource. func DeleteConfigmap(job *vcbatch.Job, kubeClients kubernetes.Interface, cmName string) error { if _, err := kubeClients.CoreV1().ConfigMaps(job.Namespace).Get(cmName, metav1.GetOptions{}); err != nil { if !apierrors.IsNotFound(err) { @@ -141,7 +143,7 @@ func DeleteConfigmap(job *vcbatch.Job, kubeClients kubernetes.Interface, cmName return nil } -// DeleteSecret delete secret +// DeleteSecret delete secret. func DeleteSecret(job *vcbatch.Job, kubeClients kubernetes.Interface, secretName string) error { err := kubeClients.CoreV1().Secrets(job.Namespace).Delete(secretName, nil) if err != nil && true == apierrors.IsNotFound(err) { @@ -151,7 +153,7 @@ func DeleteSecret(job *vcbatch.Job, kubeClients kubernetes.Interface, secretName return err } -// GeneratePodgroupName generate podgroup name of normal pod +// GeneratePodgroupName generate podgroup name of normal pod. func GeneratePodgroupName(pod *v1.Pod) string { pgName := vcbatch.PodgroupNamePrefix @@ -169,7 +171,7 @@ func GeneratePodgroupName(pod *v1.Pod) string { return pgName } -// StartHealthz register healthz interface +// StartHealthz register healthz interface. func StartHealthz(healthzBindAddress, name string) error { listener, err := net.Listen("tcp", healthzBindAddress) if err != nil { diff --git a/pkg/apis/scheduling/scheme/scheme.go b/pkg/apis/scheduling/scheme/scheme.go index 533f6155992..0b037c15446 100644 --- a/pkg/apis/scheduling/scheme/scheme.go +++ b/pkg/apis/scheduling/scheme/scheme.go @@ -35,7 +35,7 @@ func init() { Install(Scheme) } -// Install registers the API group and adds types to a scheme +// Install registers the API group and adds types to a scheme. func Install(scheme *runtime.Scheme) { v1beta1.AddToScheme(scheme) scheduling.AddToScheme(scheme) diff --git a/pkg/apis/scheduling/types.go b/pkg/apis/scheduling/types.go index ec74fe6b7d9..b38f733375e 100644 --- a/pkg/apis/scheduling/types.go +++ b/pkg/apis/scheduling/types.go @@ -24,7 +24,7 @@ import ( // PodGroupPhase is the phase of a pod group at the current time. type PodGroupPhase string -// QueueState is state type of queue +// QueueState is state type of queue. type QueueState string const ( @@ -113,7 +113,7 @@ const ( NotEnoughPodsReason string = "NotEnoughTasks" ) -// QueueEvent represent the phase of queue +// QueueEvent represent the phase of queue. type QueueEvent string const ( diff --git a/pkg/apis/scheduling/v1beta1/types.go b/pkg/apis/scheduling/v1beta1/types.go index 9cb0def4df2..78ff2a09dd4 100644 --- a/pkg/apis/scheduling/v1beta1/types.go +++ b/pkg/apis/scheduling/v1beta1/types.go @@ -24,7 +24,7 @@ import ( // PodGroupPhase is the phase of a pod group at the current time. type PodGroupPhase string -// QueueState is state type of queue +// QueueState is state type of queue. type QueueState string const ( @@ -113,7 +113,7 @@ const ( NotEnoughPodsReason string = "NotEnoughTasks" ) -// QueueEvent represent the phase of queue +// QueueEvent represent the phase of queue. type QueueEvent string const ( diff --git a/pkg/cli/job/delete.go b/pkg/cli/job/delete.go index 99d06d9ab74..f51549c2247 100644 --- a/pkg/cli/job/delete.go +++ b/pkg/cli/job/delete.go @@ -36,7 +36,7 @@ type deleteFlags struct { var deleteJobFlags = &deleteFlags{} -// InitDeleteFlags init the delete command flags +// InitDeleteFlags init the delete command flags. func InitDeleteFlags(cmd *cobra.Command) { initFlags(cmd, &deleteJobFlags.commonFlags) @@ -44,7 +44,7 @@ func InitDeleteFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&deleteJobFlags.JobName, "name", "N", "", "the name of job") } -// DeleteJob delete the job +// DeleteJob delete the job. func DeleteJob() error { config, err := util.BuildConfig(deleteJobFlags.Master, deleteJobFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/job/list.go b/pkg/cli/job/list.go index d8e2635fc38..a5ce04b0dae 100644 --- a/pkg/cli/job/list.go +++ b/pkg/cli/job/list.go @@ -78,7 +78,7 @@ const ( var listJobFlags = &listFlags{} -// InitListFlags init list command flags +// InitListFlags init list command flags. func InitListFlags(cmd *cobra.Command) { initFlags(cmd, &listJobFlags.commonFlags) @@ -88,7 +88,7 @@ func InitListFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&listJobFlags.selector, "selector", "", "", "fuzzy matching jobName") } -// ListJobs lists all jobs details +// ListJobs lists all jobs details. func ListJobs() error { config, err := util.BuildConfig(listJobFlags.Master, listJobFlags.Kubeconfig) if err != nil { @@ -112,7 +112,7 @@ func ListJobs() error { return nil } -// PrintJobs prints all jobs details +// PrintJobs prints all jobs details. func PrintJobs(jobs *v1alpha1.JobList, writer io.Writer) { maxLenInfo := getMaxLen(jobs) diff --git a/pkg/cli/job/resume.go b/pkg/cli/job/resume.go index 3230dc8c709..9fb8c6fba8c 100644 --- a/pkg/cli/job/resume.go +++ b/pkg/cli/job/resume.go @@ -34,7 +34,7 @@ type resumeFlags struct { var resumeJobFlags = &resumeFlags{} -// InitResumeFlags init resume command flags +// InitResumeFlags init resume command flags. func InitResumeFlags(cmd *cobra.Command) { initFlags(cmd, &resumeJobFlags.commonFlags) @@ -42,7 +42,7 @@ func InitResumeFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&resumeJobFlags.JobName, "name", "N", "", "the name of job") } -// ResumeJob resumes the job +// ResumeJob resumes the job. func ResumeJob() error { config, err := util.BuildConfig(resumeJobFlags.Master, resumeJobFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/job/run.go b/pkg/cli/job/run.go index b579844fffe..c0cfbce9b1e 100644 --- a/pkg/cli/job/run.go +++ b/pkg/cli/job/run.go @@ -49,7 +49,7 @@ type runFlags struct { var launchJobFlags = &runFlags{} -// InitRunFlags init the run flags +// InitRunFlags init the run flags. func InitRunFlags(cmd *cobra.Command) { initFlags(cmd, &launchJobFlags.commonFlags) @@ -66,7 +66,7 @@ func InitRunFlags(cmd *cobra.Command) { var jobName = "job.volcano.sh" -// RunJob creates the job +// RunJob creates the job. func RunJob() error { config, err := util.BuildConfig(launchJobFlags.Master, launchJobFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/job/suspend.go b/pkg/cli/job/suspend.go index aa6e76d4b91..8dc7c90987b 100644 --- a/pkg/cli/job/suspend.go +++ b/pkg/cli/job/suspend.go @@ -34,7 +34,7 @@ type suspendFlags struct { var suspendJobFlags = &suspendFlags{} -// InitSuspendFlags init suspend related flags +// InitSuspendFlags init suspend related flags. func InitSuspendFlags(cmd *cobra.Command) { initFlags(cmd, &suspendJobFlags.commonFlags) @@ -42,7 +42,7 @@ func InitSuspendFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&suspendJobFlags.JobName, "name", "N", "", "the name of job") } -// SuspendJob suspends the job +// SuspendJob suspends the job. func SuspendJob() error { config, err := util.BuildConfig(suspendJobFlags.Master, suspendJobFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/job/util.go b/pkg/cli/job/util.go index 345b999c746..202bbdaf4f7 100644 --- a/pkg/cli/job/util.go +++ b/pkg/cli/job/util.go @@ -100,7 +100,7 @@ func translateTimestampSince(timestamp metav1.Time) string { return HumanDuration(time.Since(timestamp.Time)) } -// HumanDuration translate time.Duration to human readable time string +// HumanDuration translate time.Duration to human readable time string. func HumanDuration(d time.Duration) string { // Allow deviation no more than 2 seconds(excluded) to tolerate machine time // inconsistence, it can be considered as almost now. diff --git a/pkg/cli/job/view.go b/pkg/cli/job/view.go index 362ce1fe5bb..de86bb8eb34 100644 --- a/pkg/cli/job/view.go +++ b/pkg/cli/job/view.go @@ -42,7 +42,7 @@ type viewFlags struct { JobName string } -// level of print indent +// level of print indent. const ( Level0 = iota Level1 @@ -51,7 +51,7 @@ const ( var viewJobFlags = &viewFlags{} -// InitViewFlags init the view command flags +// InitViewFlags init the view command flags. func InitViewFlags(cmd *cobra.Command) { initFlags(cmd, &viewJobFlags.commonFlags) @@ -59,7 +59,7 @@ func InitViewFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&viewJobFlags.JobName, "name", "N", "", "the name of job") } -// ViewJob gives full details of the job +// ViewJob gives full details of the job. func ViewJob() error { config, err := util.BuildConfig(viewJobFlags.Master, viewJobFlags.Kubeconfig) if err != nil { @@ -84,7 +84,7 @@ func ViewJob() error { return nil } -// PrintJobInfo print the job detailed info into writer +// PrintJobInfo print the job detailed info into writer. func PrintJobInfo(job *v1alpha1.Job, writer io.Writer) { WriteLine(writer, Level0, "Name: \t%s\n", job.Name) WriteLine(writer, Level0, "Namespace: \t%s\n", job.Namespace) @@ -198,7 +198,7 @@ func PrintJobInfo(job *v1alpha1.Job, writer io.Writer) { } } -// PrintEvents print event info to writer +// PrintEvents print event info to writer. func PrintEvents(events []coreV1.Event, writer io.Writer) { if len(events) > 0 { WriteLine(writer, Level0, "%s:\n%-15s\t%-40s\t%-30s\t%-40s\t%s\n", "Events", "Type", "Reason", "Age", "Form", "Message") @@ -228,7 +228,7 @@ func PrintEvents(events []coreV1.Event, writer io.Writer) { } -// GetEvents get the job event by config +// GetEvents get the job event by config. func GetEvents(config *rest.Config, job *v1alpha1.Job) []coreV1.Event { kubernetes, err := kubernetes.NewForConfig(config) if err != nil { @@ -245,7 +245,7 @@ func GetEvents(config *rest.Config, job *v1alpha1.Job) []coreV1.Event { return jobEvents } -// WriteLine write lines with specified indent +// WriteLine write lines with specified indent. func WriteLine(writer io.Writer, spaces int, content string, params ...interface{}) { prefix := "" for i := 0; i < spaces; i++ { diff --git a/pkg/cli/queue/create.go b/pkg/cli/queue/create.go index 2ff74a17c39..c2babd2248e 100644 --- a/pkg/cli/queue/create.go +++ b/pkg/cli/queue/create.go @@ -36,7 +36,7 @@ type createFlags struct { var createQueueFlags = &createFlags{} -// InitCreateFlags is used to init all flags during queue creating +// InitCreateFlags is used to init all flags during queue creating. func InitCreateFlags(cmd *cobra.Command) { initFlags(cmd, &createQueueFlags.commonFlags) @@ -46,7 +46,7 @@ func InitCreateFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&createQueueFlags.State, "state", "S", "Open", "the state of queue") } -// CreateQueue create queue +// CreateQueue create queue. func CreateQueue() error { config, err := buildConfig(createQueueFlags.Master, createQueueFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/queue/delete.go b/pkg/cli/queue/delete.go index 7c7a52cb193..36af775162f 100644 --- a/pkg/cli/queue/delete.go +++ b/pkg/cli/queue/delete.go @@ -35,14 +35,14 @@ type deleteFlags struct { var deleteQueueFlags = &deleteFlags{} -// InitDeleteFlags is used to init all flags during queue deleting +// InitDeleteFlags is used to init all flags during queue deleting. func InitDeleteFlags(cmd *cobra.Command) { initFlags(cmd, &deleteQueueFlags.commonFlags) cmd.Flags().StringVarP(&deleteQueueFlags.Name, "name", "n", "", "the name of queue") } -// DeleteQueue delete queue +// DeleteQueue delete queue. func DeleteQueue() error { config, err := buildConfig(deleteQueueFlags.Master, deleteQueueFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/queue/get.go b/pkg/cli/queue/get.go index 011b3df04f9..486c1d5c302 100644 --- a/pkg/cli/queue/get.go +++ b/pkg/cli/queue/get.go @@ -37,7 +37,7 @@ type getFlags struct { var getQueueFlags = &getFlags{} -// InitGetFlags is used to init all flags +// InitGetFlags is used to init all flags. func InitGetFlags(cmd *cobra.Command) { initFlags(cmd, &getQueueFlags.commonFlags) @@ -45,7 +45,7 @@ func InitGetFlags(cmd *cobra.Command) { } -// GetQueue gets a queue +// GetQueue gets a queue. func GetQueue() error { config, err := buildConfig(getQueueFlags.Master, getQueueFlags.Kubeconfig) if err != nil { @@ -68,7 +68,7 @@ func GetQueue() error { return nil } -// PrintQueue prints queue information +// PrintQueue prints queue information. func PrintQueue(queue *v1beta1.Queue, writer io.Writer) { _, err := fmt.Fprintf(writer, "%-25s%-8s%-8s%-8s%-8s%-8s%-8s\n", Name, Weight, State, Inqueue, Pending, Running, Unknown) diff --git a/pkg/cli/queue/list.go b/pkg/cli/queue/list.go index 8807cad00a9..a212d651639 100644 --- a/pkg/cli/queue/list.go +++ b/pkg/cli/queue/list.go @@ -58,12 +58,12 @@ const ( var listQueueFlags = &listFlags{} -// InitListFlags inits all flags +// InitListFlags inits all flags. func InitListFlags(cmd *cobra.Command) { initFlags(cmd, &listQueueFlags.commonFlags) } -// ListQueue lists all the queue +// ListQueue lists all the queue. func ListQueue() error { config, err := buildConfig(listQueueFlags.Master, listQueueFlags.Kubeconfig) if err != nil { @@ -85,7 +85,7 @@ func ListQueue() error { return nil } -// PrintQueues prints queue information +// PrintQueues prints queue information. func PrintQueues(queues *v1beta1.QueueList, writer io.Writer) { _, err := fmt.Fprintf(writer, "%-25s%-8s%-8s%-8s%-8s%-8s%-8s\n", Name, Weight, State, Inqueue, Pending, Running, Unknown) diff --git a/pkg/cli/queue/operate.go b/pkg/cli/queue/operate.go index 27c8f7bc02c..109fecde4b5 100644 --- a/pkg/cli/queue/operate.go +++ b/pkg/cli/queue/operate.go @@ -49,7 +49,7 @@ type operateFlags struct { var operateQueueFlags = &operateFlags{} -// InitOperateFlags is used to init all flags during queue operating + func InitOperateFlags(cmd *cobra.Command) { initFlags(cmd, &operateQueueFlags.commonFlags) @@ -59,7 +59,7 @@ func InitOperateFlags(cmd *cobra.Command) { "operate action to queue, valid actions are open, close, update") } -// OperateQueue operates queue + func OperateQueue() error { config, err := buildConfig(operateQueueFlags.Master, operateQueueFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/util/util.go b/pkg/cli/util/util.go index 8eda8bb2803..15ae87467a8 100644 --- a/pkg/cli/util/util.go +++ b/pkg/cli/util/util.go @@ -36,13 +36,13 @@ import ( "volcano.sh/volcano/pkg/client/clientset/versioned" ) -// CommonFlags are the flags that most command lines have +// CommonFlags are the flags that most command lines have. type CommonFlags struct { Master string Kubeconfig string } -// InitFlags initializes the common flags for most command lines +// InitFlags initializes the common flags for most command lines. func InitFlags(cmd *cobra.Command, cf *CommonFlags) { cmd.Flags().StringVarP(&cf.Master, "master", "s", "", "the address of apiserver") @@ -55,7 +55,7 @@ func InitFlags(cmd *cobra.Command, cf *CommonFlags) { cmd.Flags().StringVarP(&cf.Kubeconfig, "kubeconfig", "k", kubeConfFile, "(optional) absolute path to the kubeconfig file") } -// HomeDir gets the env $HOME +// HomeDir gets the env $HOME. func HomeDir() string { if h := os.Getenv("HOME"); h != "" { return h @@ -63,7 +63,7 @@ func HomeDir() string { return os.Getenv("USERPROFILE") // windows } -// BuildConfig builds the configure file for command lines +// BuildConfig builds the configure file for command lines. func BuildConfig(master, kubeconfig string) (*rest.Config, error) { return clientcmd.BuildConfigFromFlags(master, kubeconfig) } @@ -92,7 +92,7 @@ func PopulateResourceListV1(spec string) (v1.ResourceList, error) { return result, nil } -// CreateJobCommand executes a command such as resume/suspend +// CreateJobCommand executes a command such as resume/suspend. func CreateJobCommand(config *rest.Config, ns, name string, action vcbus.Action) error { jobClient := versioned.NewForConfigOrDie(config) job, err := jobClient.BatchV1alpha1().Jobs(ns).Get(name, metav1.GetOptions{}) @@ -121,7 +121,7 @@ func CreateJobCommand(config *rest.Config, ns, name string, action vcbus.Action) return nil } -// TranslateTimestampSince translates the time stamp +// TranslateTimestampSince translates the time stamp. func TranslateTimestampSince(timestamp metav1.Time) string { if timestamp.IsZero() { return "" @@ -129,7 +129,7 @@ func TranslateTimestampSince(timestamp metav1.Time) string { return HumanDuration(time.Since(timestamp.Time)) } -// HumanDuration translate time.Duration to human readable time string +// HumanDuration translate time.Duration to human readable time string. func HumanDuration(d time.Duration) string { // Allow deviation no more than 2 seconds(excluded) to tolerate machine time // inconsistence, it can be considered as almost now. diff --git a/pkg/cli/vcancel/cancel.go b/pkg/cli/vcancel/cancel.go index 1bebad6c069..bddeadd6da6 100644 --- a/pkg/cli/vcancel/cancel.go +++ b/pkg/cli/vcancel/cancel.go @@ -36,7 +36,7 @@ type cancelFlags struct { var cancelJobFlags = &cancelFlags{} -// InitCancelFlags init the cancel command flags +// InitCancelFlags init the cancel command flags. func InitCancelFlags(cmd *cobra.Command) { util.InitFlags(cmd, &cancelJobFlags.CommonFlags) @@ -44,7 +44,7 @@ func InitCancelFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&cancelJobFlags.JobName, "name", "n", "", "the name of job") } -// CancelJob cancel the job +// CancelJob cancel the job. func CancelJob() error { config, err := util.BuildConfig(cancelJobFlags.Master, cancelJobFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/vjobs/view.go b/pkg/cli/vjobs/view.go index 4085386dd26..acfc5069eb9 100644 --- a/pkg/cli/vjobs/view.go +++ b/pkg/cli/vjobs/view.go @@ -89,7 +89,7 @@ const ( var viewJobFlags = &viewFlags{} -// InitViewFlags init the view command flags +// InitViewFlags init the view command flags. func InitViewFlags(cmd *cobra.Command) { util.InitFlags(cmd, &viewJobFlags.CommonFlags) @@ -100,7 +100,7 @@ func InitViewFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&viewJobFlags.selector, "selector", "", "", "fuzzy matching jobName") } -// ViewJob gives full details of the job +// ViewJob gives full details of the job. func ViewJob() error { config, err := util.BuildConfig(viewJobFlags.Master, viewJobFlags.Kubeconfig) if err != nil { @@ -125,7 +125,7 @@ func ViewJob() error { return nil } -// PrintJobInfo print the job detailed info into writer +// PrintJobInfo print the job detailed info into writer. func PrintJobInfo(job *v1alpha1.Job, writer io.Writer) { WriteLine(writer, Level0, "Name: \t%s\n", job.Name) WriteLine(writer, Level0, "Namespace: \t%s\n", job.Namespace) @@ -239,7 +239,7 @@ func PrintJobInfo(job *v1alpha1.Job, writer io.Writer) { } } -// PrintEvents print event info to writer +// PrintEvents print event info to writer. func PrintEvents(events []coreV1.Event, writer io.Writer) { if len(events) > 0 { WriteLine(writer, Level0, "%s:\n%-15s\t%-40s\t%-30s\t%-40s\t%s\n", "Events", "Type", "Reason", "Age", "Form", "Message") @@ -269,7 +269,7 @@ func PrintEvents(events []coreV1.Event, writer io.Writer) { } -// GetEvents get the job event by config +// GetEvents get the job event by config. func GetEvents(config *rest.Config, job *v1alpha1.Job) []coreV1.Event { kubernetes, err := kubernetes.NewForConfig(config) if err != nil { @@ -286,7 +286,7 @@ func GetEvents(config *rest.Config, job *v1alpha1.Job) []coreV1.Event { return jobEvents } -// WriteLine write lines with specified indent +// WriteLine write lines with specified indent. func WriteLine(writer io.Writer, spaces int, content string, params ...interface{}) { prefix := "" for i := 0; i < spaces; i++ { @@ -295,7 +295,7 @@ func WriteLine(writer io.Writer, spaces int, content string, params ...interface fmt.Fprintf(writer, prefix+content, params...) } -// ListJobs lists all jobs details +// ListJobs lists all jobs details. func ListJobs() error { config, err := util.BuildConfig(viewJobFlags.Master, viewJobFlags.Kubeconfig) if err != nil { @@ -319,7 +319,7 @@ func ListJobs() error { return nil } -// PrintJobs prints all jobs details +// PrintJobs prints all jobs details. func PrintJobs(jobs *v1alpha1.JobList, writer io.Writer) { maxLenInfo := getMaxLen(jobs) diff --git a/pkg/cli/vqueues/get.go b/pkg/cli/vqueues/get.go index 2f9cc0b12e9..ef137b7e3a7 100644 --- a/pkg/cli/vqueues/get.go +++ b/pkg/cli/vqueues/get.go @@ -61,7 +61,7 @@ const ( var getQueueFlags = &getFlags{} -// InitGetFlags is used to init all flags +// InitGetFlags is used to init all flags. func InitGetFlags(cmd *cobra.Command) { util.InitFlags(cmd, &getQueueFlags.CommonFlags) @@ -69,7 +69,7 @@ func InitGetFlags(cmd *cobra.Command) { } -// ListQueue lists all the queue +// ListQueue lists all the queue. func ListQueue() error { config, err := util.BuildConfig(getQueueFlags.Master, getQueueFlags.Kubeconfig) if err != nil { @@ -91,7 +91,7 @@ func ListQueue() error { return nil } -// PrintQueues prints queue information +// PrintQueues prints queue information. func PrintQueues(queues *v1beta1.QueueList, writer io.Writer) { _, err := fmt.Fprintf(writer, "%-25s%-8s%-8s%-8s%-8s%-8s%-8s\n", Name, Weight, State, Inqueue, Pending, Running, Unknown) @@ -109,7 +109,7 @@ func PrintQueues(queues *v1beta1.QueueList, writer io.Writer) { } -// GetQueue gets a queue +// GetQueue gets a queue. func GetQueue() error { config, err := util.BuildConfig(getQueueFlags.Master, getQueueFlags.Kubeconfig) if err != nil { @@ -132,7 +132,7 @@ func GetQueue() error { return nil } -// PrintQueue prints queue information +// PrintQueue prints queue information. func PrintQueue(queue *v1beta1.Queue, writer io.Writer) { _, err := fmt.Fprintf(writer, "%-25s%-8s%-8s%-8s%-8s%-8s%-8s\n", Name, Weight, State, Inqueue, Pending, Running, Unknown) diff --git a/pkg/cli/vresume/resume.go b/pkg/cli/vresume/resume.go index abe0a1b0b64..723fca555d6 100644 --- a/pkg/cli/vresume/resume.go +++ b/pkg/cli/vresume/resume.go @@ -34,7 +34,7 @@ type resumeFlags struct { var resumeJobFlags = &resumeFlags{} -// InitResumeFlags init resume command flags +// InitResumeFlags init resume command flags. func InitResumeFlags(cmd *cobra.Command) { util.InitFlags(cmd, &resumeJobFlags.CommonFlags) @@ -42,7 +42,7 @@ func InitResumeFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&resumeJobFlags.JobName, "name", "n", "", "the name of job") } -// ResumeJob resumes the job +// ResumeJob resumes the job. func ResumeJob() error { config, err := util.BuildConfig(resumeJobFlags.Master, resumeJobFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/vsub/run.go b/pkg/cli/vsub/run.go index 8314b896c86..3e73fc1c150 100644 --- a/pkg/cli/vsub/run.go +++ b/pkg/cli/vsub/run.go @@ -64,7 +64,7 @@ const ( defaultJobNamespace = "default" ) -// InitRunFlags init the run flags +// InitRunFlags init the run flags. func InitRunFlags(cmd *cobra.Command) { util.InitFlags(cmd, &launchJobFlags.CommonFlags) @@ -121,7 +121,7 @@ func setDefaultArgs() { var jobName = "job.volcano.sh" -// RunJob creates the job +// RunJob creates the job. func RunJob() error { config, err := util.BuildConfig(launchJobFlags.Master, launchJobFlags.Kubeconfig) if err != nil { diff --git a/pkg/cli/vsuspend/suspend.go b/pkg/cli/vsuspend/suspend.go index 1779cfa648e..2fdd43af421 100644 --- a/pkg/cli/vsuspend/suspend.go +++ b/pkg/cli/vsuspend/suspend.go @@ -34,7 +34,7 @@ type suspendFlags struct { var suspendJobFlags = &suspendFlags{} -// InitSuspendFlags init suspend related flags +// InitSuspendFlags init suspend related flags. func InitSuspendFlags(cmd *cobra.Command) { util.InitFlags(cmd, &suspendJobFlags.CommonFlags) @@ -42,7 +42,7 @@ func InitSuspendFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&suspendJobFlags.JobName, "name", "n", "", "the name of job") } -// SuspendJob suspends the job +// SuspendJob suspends the job. func SuspendJob() error { config, err := util.BuildConfig(suspendJobFlags.Master, suspendJobFlags.Kubeconfig) if err != nil { diff --git a/pkg/client/clientset/versioned/typed/bus/v1alpha1/command.go b/pkg/client/clientset/versioned/typed/bus/v1alpha1/command.go index 52fdd0793f7..a0618498532 100644 --- a/pkg/client/clientset/versioned/typed/bus/v1alpha1/command.go +++ b/pkg/client/clientset/versioned/typed/bus/v1alpha1/command.go @@ -48,13 +48,13 @@ type CommandInterface interface { CommandExpansion } -// commands implements CommandInterface +// commands implements CommandInterface. type commands struct { client rest.Interface ns string } -// newCommands returns a Commands +// newCommands returns a Commands. func newCommands(c *BusV1alpha1Client, namespace string) *commands { return &commands{ client: c.RESTClient(), diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 9e76ae288ea..ea9a407ff70 100644 --- a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -30,7 +30,7 @@ import ( // NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle. type SharedInformerFactory interface { Start(stopCh <-chan struct{}) InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer diff --git a/pkg/controllers/apis/job_info.go b/pkg/controllers/apis/job_info.go index 88f41223af3..f68cab47a8a 100644 --- a/pkg/controllers/apis/job_info.go +++ b/pkg/controllers/apis/job_info.go @@ -24,7 +24,7 @@ import ( batch "volcano.sh/volcano/pkg/apis/batch/v1alpha1" ) -//JobInfo struct +//JobInfo struct. type JobInfo struct { Namespace string Name string @@ -33,7 +33,7 @@ type JobInfo struct { Pods map[string]map[string]*v1.Pod } -//Clone function clones the k8s pod values to the JobInfo struct +//Clone function clones the k8s pod values to the JobInfo struct. func (ji *JobInfo) Clone() *JobInfo { job := &JobInfo{ Namespace: ji.Namespace, @@ -53,7 +53,7 @@ func (ji *JobInfo) Clone() *JobInfo { return job } -//SetJob sets the volcano jobs values to the JobInfo struct +//SetJob sets the volcano jobs values to the JobInfo struct. func (ji *JobInfo) SetJob(job *batch.Job) { ji.Name = job.Name ji.Namespace = job.Namespace @@ -61,7 +61,7 @@ func (ji *JobInfo) SetJob(job *batch.Job) { } //AddPod adds the k8s pod object values to the Pods field -//of JobStruct if it doesn't exist. Otherwise it throws error +//of JobStruct if it doesn't exist. Otherwise it throws error. func (ji *JobInfo) AddPod(pod *v1.Pod) error { taskName, found := pod.Annotations[batch.TaskSpecKey] if !found { @@ -86,7 +86,7 @@ func (ji *JobInfo) AddPod(pod *v1.Pod) error { return nil } -//UpdatePod updates the k8s pod object values to the existing pod +//UpdatePod updates the k8s pod object values to the existing pod. func (ji *JobInfo) UpdatePod(pod *v1.Pod) error { taskName, found := pod.Annotations[batch.TaskSpecKey] if !found { @@ -111,7 +111,7 @@ func (ji *JobInfo) UpdatePod(pod *v1.Pod) error { return nil } -//DeletePod deletes the given k8s pod from the JobInfo struct +//DeletePod deletes the given k8s pod from the JobInfo struct. func (ji *JobInfo) DeletePod(pod *v1.Pod) error { taskName, found := pod.Annotations[batch.TaskSpecKey] if !found { diff --git a/pkg/controllers/apis/request.go b/pkg/controllers/apis/request.go index 7e741d77215..6254b21918e 100644 --- a/pkg/controllers/apis/request.go +++ b/pkg/controllers/apis/request.go @@ -21,7 +21,7 @@ import ( "volcano.sh/volcano/pkg/apis/bus/v1alpha1" ) -//Request struct +//Request struct. type Request struct { Namespace string JobName string @@ -34,7 +34,7 @@ type Request struct { JobVersion int32 } -//String function returns the request in string format +// String function returns the request in string format. func (r Request) String() string { return fmt.Sprintf( "Queue: %s, Job: %s/%s, Task:%s, Event:%s, ExitCode:%d, Action:%s, JobVersion: %d", diff --git a/pkg/controllers/cache/cache.go b/pkg/controllers/cache/cache.go index c2d45eaa760..4d56ff3b8cf 100644 --- a/pkg/controllers/cache/cache.go +++ b/pkg/controllers/cache/cache.go @@ -43,17 +43,17 @@ func keyFn(ns, name string) string { return fmt.Sprintf("%s/%s", ns, name) } -//JobKeyByName gets the key for the job name +//JobKeyByName gets the key for the job name. func JobKeyByName(namespace string, name string) string { return keyFn(namespace, name) } -//JobKeyByReq gets the key for the job request +//JobKeyByReq gets the key for the job request. func JobKeyByReq(req *apis.Request) string { return keyFn(req.Namespace, req.JobName) } -//JobKey gets the "ns"/"name" format of the given job +//JobKey gets the "ns"/"name" format of the given job. func JobKey(job *v1alpha1.Job) string { return keyFn(job.Namespace, job.Name) } @@ -72,7 +72,7 @@ func jobKeyOfPod(pod *v1.Pod) (string, error) { return keyFn(pod.Namespace, jobName), nil } -//New gets the job Cache +// New gets the job Cache. func New() Cache { queue := workqueue.NewMaxOfRateLimiter( workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 180*time.Second), diff --git a/pkg/controllers/cache/interface.go b/pkg/controllers/cache/interface.go index 6730ef6177b..60c83453a46 100644 --- a/pkg/controllers/cache/interface.go +++ b/pkg/controllers/cache/interface.go @@ -23,7 +23,7 @@ import ( "volcano.sh/volcano/pkg/controllers/apis" ) -// Cache Interface +// Cache Interface. type Cache interface { Run(stopCh <-chan struct{}) diff --git a/pkg/controllers/garbagecollector/garbagecollector.go b/pkg/controllers/garbagecollector/garbagecollector.go index 187c978ff50..06ce9bdd134 100644 --- a/pkg/controllers/garbagecollector/garbagecollector.go +++ b/pkg/controllers/garbagecollector/garbagecollector.go @@ -56,7 +56,7 @@ type GarbageCollector struct { queue workqueue.RateLimitingInterface } -// NewGarbageCollector creates an instance of GarbageCollector +// NewGarbageCollector creates an instance of GarbageCollector. func NewGarbageCollector(vkClient vcclientset.Interface) *GarbageCollector { jobInformer := informerfactory.NewSharedInformerFactory(vkClient, 0).Batch().V1alpha1().Jobs() diff --git a/pkg/controllers/job/constant.go b/pkg/controllers/job/constant.go index a674d139eb0..2c9fe5e07fa 100644 --- a/pkg/controllers/job/constant.go +++ b/pkg/controllers/job/constant.go @@ -16,7 +16,7 @@ limitations under the License. package job -// Reasons for pod events +// Reasons for pod events. const ( // FailedCreatePodReason is added in an event and in a replica set condition // when a pod for a replica set is failed to be created. diff --git a/pkg/controllers/job/helpers/helpers.go b/pkg/controllers/job/helpers/helpers.go index 207c164c45c..ec9b72c6d9e 100644 --- a/pkg/controllers/job/helpers/helpers.go +++ b/pkg/controllers/job/helpers/helpers.go @@ -34,7 +34,7 @@ const ( persistentVolumeClaimFmt = "%s-pvc-%s" ) -// GetTaskIndex returns task Index +// GetTaskIndex returns task Index. func GetTaskIndex(pod *v1.Pod) string { num := strings.Split(pod.Name, "-") if len(num) >= 3 { @@ -44,12 +44,12 @@ func GetTaskIndex(pod *v1.Pod) string { return "" } -// MakePodName creates pod name +// MakePodName creates pod name. func MakePodName(jobName string, taskName string, index int) string { return fmt.Sprintf(PodNameFmt, jobName, taskName, index) } -// GenRandomStr generate random str with specified length l +// GenRandomStr generate random str with specified length l. func GenRandomStr(l int) string { str := "0123456789abcdefghijklmnopqrstuvwxyz" bytes := []byte(str) @@ -61,12 +61,12 @@ func GenRandomStr(l int) string { return string(result) } -// GenPVCName generates pvc name with job name +// GenPVCName generates pvc name with job name. func GenPVCName(jobName string) string { return fmt.Sprintf(persistentVolumeClaimFmt, jobName, GenRandomStr(12)) } -// GetJobKeyByReq gets the key for the job request +// GetJobKeyByReq gets the key for the job request. func GetJobKeyByReq(req *apis.Request) string { return fmt.Sprintf("%s/%s", req.Namespace, req.JobName) } diff --git a/pkg/controllers/job/job_controller.go b/pkg/controllers/job/job_controller.go index 446b978e6a0..2f307e26c3a 100644 --- a/pkg/controllers/job/job_controller.go +++ b/pkg/controllers/job/job_controller.go @@ -63,7 +63,7 @@ const ( maxRetries = 15 ) -// Controller the Job Controller type +// Controller the Job Controller type. type Controller struct { kubeClient kubernetes.Interface vcClient vcclientset.Interface @@ -114,7 +114,7 @@ type Controller struct { workers uint32 } -// NewJobController create new Job Controller +// NewJobController create new Job Controller. func NewJobController( kubeClient kubernetes.Interface, vcClient vcclientset.Interface, @@ -219,7 +219,7 @@ func NewJobController( return cc } -// Run start JobController +// Run start JobController. func (cc *Controller) Run(stopCh <-chan struct{}) { go cc.jobInformer.Informer().Run(stopCh) diff --git a/pkg/controllers/job/job_controller_util.go b/pkg/controllers/job/job_controller_util.go index 115f61a43bb..c33294d462b 100644 --- a/pkg/controllers/job/job_controller_util.go +++ b/pkg/controllers/job/job_controller_util.go @@ -32,7 +32,7 @@ import ( jobhelpers "volcano.sh/volcano/pkg/controllers/job/helpers" ) -//MakePodName append podname,jobname,taskName and index and returns the string +// MakePodName append podname,jobname,taskName and index and returns the string. func MakePodName(jobName string, taskName string, index int) string { return fmt.Sprintf(jobhelpers.PodNameFmt, jobName, taskName, index) } @@ -203,14 +203,14 @@ func addResourceList(list, req, limit v1.ResourceList) { } } -//TaskPriority structure +// TaskPriority structure. type TaskPriority struct { priority int32 batch.TaskSpec } -//TasksPriority is a slice of TaskPriority +// TasksPriority is a slice of TaskPriority. type TasksPriority []TaskPriority func (p TasksPriority) Len() int { return len(p) } diff --git a/pkg/controllers/job/plugins/env/env.go b/pkg/controllers/job/plugins/env/env.go index ec4746691e5..16d967a1469 100644 --- a/pkg/controllers/job/plugins/env/env.go +++ b/pkg/controllers/job/plugins/env/env.go @@ -31,7 +31,7 @@ type envPlugin struct { Clientset pluginsinterface.PluginClientset } -// New creates env plugin +// New creates env plugin. func New(client pluginsinterface.PluginClientset, arguments []string) pluginsinterface.PluginInterface { envPlugin := envPlugin{pluginArguments: arguments, Clientset: client} diff --git a/pkg/controllers/job/plugins/factory.go b/pkg/controllers/job/plugins/factory.go index 80307dab225..b8425c45fdf 100644 --- a/pkg/controllers/job/plugins/factory.go +++ b/pkg/controllers/job/plugins/factory.go @@ -33,13 +33,13 @@ func init() { var pluginMutex sync.Mutex -// Plugin management +// Plugin management. var pluginBuilders = map[string]PluginBuilder{} -// PluginBuilder func prototype +// PluginBuilder func prototype. type PluginBuilder func(pluginsinterface.PluginClientset, []string) pluginsinterface.PluginInterface -// RegisterPluginBuilder register plugin builders +// RegisterPluginBuilder register plugin builders. func RegisterPluginBuilder(name string, pc PluginBuilder) { pluginMutex.Lock() defer pluginMutex.Unlock() @@ -47,7 +47,7 @@ func RegisterPluginBuilder(name string, pc PluginBuilder) { pluginBuilders[name] = pc } -// GetPluginBuilder returns plugin builder for a given plugin name +// GetPluginBuilder returns plugin builder for a given plugin name. func GetPluginBuilder(name string) (PluginBuilder, bool) { pluginMutex.Lock() defer pluginMutex.Unlock() diff --git a/pkg/controllers/job/plugins/interface/interface.go b/pkg/controllers/job/plugins/interface/interface.go index 71a4fcf770e..822ad981009 100644 --- a/pkg/controllers/job/plugins/interface/interface.go +++ b/pkg/controllers/job/plugins/interface/interface.go @@ -23,12 +23,12 @@ import ( vcbatch "volcano.sh/volcano/pkg/apis/batch/v1alpha1" ) -// PluginClientset clientset +// PluginClientset clientset. type PluginClientset struct { KubeClients kubernetes.Interface } -// PluginInterface interface +// PluginInterface interface. type PluginInterface interface { // The unique name of Plugin. Name() string diff --git a/pkg/controllers/job/plugins/ssh/ssh.go b/pkg/controllers/job/plugins/ssh/ssh.go index 29e995ec292..567f9742205 100644 --- a/pkg/controllers/job/plugins/ssh/ssh.go +++ b/pkg/controllers/job/plugins/ssh/ssh.go @@ -46,7 +46,7 @@ type sshPlugin struct { sshKeyFilePath string } -// New creates ssh plugin + func New(client pluginsinterface.PluginClientset, arguments []string) pluginsinterface.PluginInterface { sshPlugin := sshPlugin{ pluginArguments: arguments, diff --git a/pkg/controllers/job/plugins/svc/svc.go b/pkg/controllers/job/plugins/svc/svc.go index 4d01a04c910..424304f4de4 100644 --- a/pkg/controllers/job/plugins/svc/svc.go +++ b/pkg/controllers/job/plugins/svc/svc.go @@ -47,7 +47,7 @@ type servicePlugin struct { disableNetworkPolicy bool } -// New creates service plugin +// New creates service plugin. func New(client pluginsinterface.PluginClientset, arguments []string) pluginsinterface.PluginInterface { servicePlugin := servicePlugin{pluginArguments: arguments, Clientset: client} @@ -303,7 +303,7 @@ func (sp *servicePlugin) cmName(job *batch.Job) string { return fmt.Sprintf("%s-%s", job.Name, sp.Name()) } -// GenerateHosts generates hostnames per task +// GenerateHosts generates hostnames per task. func GenerateHosts(job *batch.Job) map[string]string { hostFile := make(map[string]string, len(job.Spec.Tasks)) diff --git a/pkg/controllers/job/state/factory.go b/pkg/controllers/job/state/factory.go index c5b5e4ef596..420817ae4cc 100644 --- a/pkg/controllers/job/state/factory.go +++ b/pkg/controllers/job/state/factory.go @@ -36,10 +36,10 @@ type ActionFn func(job *apis.JobInfo, fn UpdateStatusFn) error //KillActionFn kill all Pods of Job with phase not in podRetainPhase. type KillActionFn func(job *apis.JobInfo, podRetainPhase PhaseMap, fn UpdateStatusFn) error -//PodRetainPhaseNone stores no phase +//PodRetainPhaseNone stores no phase. var PodRetainPhaseNone = PhaseMap{} -// PodRetainPhaseSoft stores PodSucceeded and PodFailed Phase +// PodRetainPhaseSoft stores PodSucceeded and PodFailed Phase. var PodRetainPhaseSoft = PhaseMap{ v1.PodSucceeded: {}, v1.PodFailed: {}, @@ -52,13 +52,13 @@ var ( KillJob KillActionFn ) -//State interface +//State interface. type State interface { // Execute executes the actions based on current state. Execute(act v1alpha1.Action) error } -// NewState gets the state from the volcano job Phase +// NewState gets the state from the volcano job Phase. func NewState(jobInfo *apis.JobInfo) State { job := jobInfo.Job switch job.Status.State.Phase { diff --git a/pkg/controllers/job/state/util.go b/pkg/controllers/job/state/util.go index c37d7500823..0845d0c98b3 100644 --- a/pkg/controllers/job/state/util.go +++ b/pkg/controllers/job/state/util.go @@ -23,7 +23,7 @@ import ( // DefaultMaxRetry is the default number of retries. const DefaultMaxRetry int32 = 3 -// TotalTasks returns number of tasks in a given volcano job +// TotalTasks returns number of tasks in a given volcano job. func TotalTasks(job *vcbatch.Job) int32 { var rep int32 diff --git a/pkg/controllers/podgroup/pg_controller.go b/pkg/controllers/podgroup/pg_controller.go index 501cf976d63..50dcb3d99c7 100644 --- a/pkg/controllers/podgroup/pg_controller.go +++ b/pkg/controllers/podgroup/pg_controller.go @@ -34,7 +34,7 @@ import ( schedulinglister "volcano.sh/volcano/pkg/client/listers/scheduling/v1beta1" ) -// Controller the Podgroup Controller type +// Controller the Podgroup Controller type. type Controller struct { kubeClient kubernetes.Interface vcClient vcclientset.Interface @@ -53,7 +53,7 @@ type Controller struct { queue workqueue.RateLimitingInterface } -// NewPodgroupController create new Podgroup Controller +// NewPodgroupController create new Podgroup Controller. func NewPodgroupController( kubeClient kubernetes.Interface, vcClient vcclientset.Interface, @@ -97,7 +97,7 @@ func NewPodgroupController( return cc } -// Run start NewPodgroupController +// Run start NewPodgroupController. func (cc *Controller) Run(stopCh <-chan struct{}) { go cc.podInformer.Informer().Run(stopCh) go cc.pgInformer.Informer().Run(stopCh) diff --git a/pkg/controllers/queue/queue_controller.go b/pkg/controllers/queue/queue_controller.go index 369d15c011e..0504ce55dc3 100644 --- a/pkg/controllers/queue/queue_controller.go +++ b/pkg/controllers/queue/queue_controller.go @@ -89,7 +89,7 @@ type Controller struct { recorder record.EventRecorder } -// NewQueueController creates a QueueController +// NewQueueController creates a QueueController. func NewQueueController( kubeClient kubernetes.Interface, vcClient vcclientset.Interface, @@ -165,7 +165,7 @@ func NewQueueController( return c } -// Run starts QueueController +// Run starts QueueController. func (c *Controller) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() diff --git a/pkg/controllers/queue/queue_controller_util.go b/pkg/controllers/queue/queue_controller_util.go index 17140f6bfd2..ad093666b8d 100644 --- a/pkg/controllers/queue/queue_controller_util.go +++ b/pkg/controllers/queue/queue_controller_util.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// IsQueueReference return if ownerReference is Queue Kind +// IsQueueReference return if ownerReference is Queue Kind. func IsQueueReference(ref *metav1.OwnerReference) bool { if ref == nil { return false diff --git a/pkg/controllers/queue/state/factory.go b/pkg/controllers/queue/state/factory.go index fdf03bedc34..0d397653b97 100644 --- a/pkg/controllers/queue/state/factory.go +++ b/pkg/controllers/queue/state/factory.go @@ -21,13 +21,13 @@ import ( "volcano.sh/volcano/pkg/apis/scheduling/v1beta1" ) -// State interface +// State interface. type State interface { // Execute executes the actions based on current state. Execute(action v1alpha1.Action) error } -// UpdateQueueStatusFn updates the queue status +// UpdateQueueStatusFn updates the queue status. type UpdateQueueStatusFn func(status *v1beta1.QueueStatus, podGroupList []string) // QueueActionFn will open, close or sync queue. @@ -42,7 +42,7 @@ var ( CloseQueue QueueActionFn ) -// NewState gets the state from queue status +// NewState gets the state from queue status. func NewState(queue *v1beta1.Queue) State { switch queue.Status.State { case "", v1beta1.QueueStateOpen: diff --git a/pkg/kube/config.go b/pkg/kube/config.go index 568f29e535a..cde0ac72f90 100644 --- a/pkg/kube/config.go +++ b/pkg/kube/config.go @@ -21,7 +21,7 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -// ClientOptions used to build kube rest config +// ClientOptions used to build kube rest config. type ClientOptions struct { Master string KubeConfig string @@ -29,7 +29,7 @@ type ClientOptions struct { Burst int } -// BuildConfig builds kube rest config with the given options +// BuildConfig builds kube rest config with the given options. func BuildConfig(opt ClientOptions) (*rest.Config, error) { var cfg *rest.Config var err error diff --git a/pkg/scheduler/api/helpers.go b/pkg/scheduler/api/helpers.go index 7509da2981c..243e9bba8f6 100644 --- a/pkg/scheduler/api/helpers.go +++ b/pkg/scheduler/api/helpers.go @@ -60,7 +60,7 @@ func getTaskStatus(pod *v1.Pod) TaskStatus { return Unknown } -// AllocatedStatus checks whether the tasks has AllocatedStatus +// AllocatedStatus checks whether the tasks has AllocatedStatus. func AllocatedStatus(status TaskStatus) bool { switch status { case Bound, Binding, Running, Allocated: @@ -70,7 +70,7 @@ func AllocatedStatus(status TaskStatus) bool { } } -// MergeErrors is used to merge multiple errors into single error +// MergeErrors is used to merge multiple errors into single error. func MergeErrors(errs ...error) error { msg := "errors: " diff --git a/pkg/scheduler/api/helpers/helpers.go b/pkg/scheduler/api/helpers/helpers.go index f94b22ec820..3042c90a4e8 100644 --- a/pkg/scheduler/api/helpers/helpers.go +++ b/pkg/scheduler/api/helpers/helpers.go @@ -24,7 +24,7 @@ import ( "volcano.sh/volcano/pkg/scheduler/api" ) -// Min is used to find the min of two resource types +// Min is used to find the min of two resource types. func Min(l, r *api.Resource) *api.Resource { res := &api.Resource{} @@ -43,7 +43,7 @@ func Min(l, r *api.Resource) *api.Resource { return res } -// Share is used to determine the share +// Share is used to determine the share. func Share(l, r float64) float64 { var share float64 if r == 0 { diff --git a/pkg/scheduler/api/job_info.go b/pkg/scheduler/api/job_info.go index bd8cadaad8d..0317e00cbf4 100644 --- a/pkg/scheduler/api/job_info.go +++ b/pkg/scheduler/api/job_info.go @@ -29,10 +29,10 @@ import ( "volcano.sh/volcano/pkg/apis/scheduling/v1beta1" ) -// TaskID is UID type for Task +// TaskID is UID type for Task. type TaskID types.UID -// TaskInfo will have all infos about the task +// TaskInfo will have all infos about the task. type TaskInfo struct { UID TaskID Job JobID @@ -63,7 +63,7 @@ func getJobID(pod *v1.Pod) JobID { return "" } -// NewTaskInfo creates new taskInfo object for a Pod +// NewTaskInfo creates new taskInfo object for a Pod. func NewTaskInfo(pod *v1.Pod) *TaskInfo { req := GetPodResourceWithoutInitContainers(pod) initResreq := GetPodResourceRequest(pod) @@ -90,7 +90,7 @@ func NewTaskInfo(pod *v1.Pod) *TaskInfo { return ti } -// Clone is used for cloning a task +// Clone is used for cloning a task. func (ti *TaskInfo) Clone() *TaskInfo { return &TaskInfo{ UID: ti.UID, @@ -107,7 +107,7 @@ func (ti *TaskInfo) Clone() *TaskInfo { } } -// String returns the taskInfo details in a string +// String returns the taskInfo details in a string. func (ti TaskInfo) String() string { return fmt.Sprintf("Task (%v:%v/%v): job %v, status %v, pri %v, resreq %v", ti.UID, ti.Namespace, ti.Name, ti.Job, ti.Status, ti.Priority, ti.Resreq) @@ -118,10 +118,10 @@ type JobID types.UID type tasksMap map[TaskID]*TaskInfo -// NodeResourceMap stores resource in a node +// NodeResourceMap stores resource in a node. type NodeResourceMap map[string]*Resource -// JobInfo will have all info of a Job +// JobInfo will have all info of a Job. type JobInfo struct { UID JobID @@ -150,7 +150,7 @@ type JobInfo struct { PodGroup *PodGroup } -// NewJobInfo creates a new jobInfo for set of tasks +// NewJobInfo creates a new jobInfo for set of tasks. func NewJobInfo(uid JobID, tasks ...*TaskInfo) *JobInfo { job := &JobInfo{ UID: uid, @@ -173,12 +173,12 @@ func NewJobInfo(uid JobID, tasks ...*TaskInfo) *JobInfo { return job } -// UnsetPodGroup removes podGroup details from a job +// UnsetPodGroup removes podGroup details from a job. func (ji *JobInfo) UnsetPodGroup() { ji.PodGroup = nil } -// SetPodGroup sets podGroup details to a job +// SetPodGroup sets podGroup details to a job. func (ji *JobInfo) SetPodGroup(pg *PodGroup) { ji.Name = pg.Name ji.Namespace = pg.Namespace @@ -197,7 +197,7 @@ func (ji *JobInfo) addTaskIndex(ti *TaskInfo) { ji.TaskStatusIndex[ti.Status][ti.UID] = ti } -// AddTaskInfo is used to add a task to a job +// AddTaskInfo is used to add a task to a job. func (ji *JobInfo) AddTaskInfo(ti *TaskInfo) { ji.Tasks[ti.UID] = ti ji.addTaskIndex(ti) @@ -240,7 +240,7 @@ func (ji *JobInfo) deleteTaskIndex(ti *TaskInfo) { } } -// DeleteTaskInfo is used to delete a task from a job +// DeleteTaskInfo is used to delete a task from a job. func (ji *JobInfo) DeleteTaskInfo(ti *TaskInfo) error { if task, found := ji.Tasks[ti.UID]; found { ji.TotalRequest.Sub(task.Resreq) @@ -259,7 +259,7 @@ func (ji *JobInfo) DeleteTaskInfo(ti *TaskInfo) error { ti.Namespace, ti.Name, ji.Namespace, ji.Name) } -// Clone is used to clone a jobInfo object +// Clone is used to clone a jobInfo object. func (ji *JobInfo) Clone() *JobInfo { info := &JobInfo{ UID: ji.UID, @@ -290,7 +290,7 @@ func (ji *JobInfo) Clone() *JobInfo { return info } -// String returns a jobInfo object in string format +// String returns a jobInfo object in string format. func (ji JobInfo) String() string { res := "" @@ -305,7 +305,7 @@ func (ji JobInfo) String() string { } // FitError returns detailed information on why a job's task failed to fit on -// each available node +// each available node. func (ji *JobInfo) FitError() string { reasons := make(map[string]int) for status, taskMap := range ji.TaskStatusIndex { @@ -374,14 +374,14 @@ func (ji *JobInfo) ValidTaskNum() int32 { return int32(occupied) } -// Ready returns whether job is ready for run +// Ready returns whether job is ready for run. func (ji *JobInfo) Ready() bool { occupied := ji.ReadyTaskNum() return occupied >= ji.MinAvailable } -// Pipelined returns whether the number of ready and pipelined task is enough +// Pipelined returns whether the number of ready and pipelined task is enough. func (ji *JobInfo) Pipelined() bool { occupied := ji.WaitingTaskNum() + ji.ReadyTaskNum() diff --git a/pkg/scheduler/api/namespace_info.go b/pkg/scheduler/api/namespace_info.go index 2642b4fb277..fd0f54fd708 100644 --- a/pkg/scheduler/api/namespace_info.go +++ b/pkg/scheduler/api/namespace_info.go @@ -24,7 +24,7 @@ import ( "k8s.io/klog" ) -// NamespaceName is name of namespace +// NamespaceName is name of namespace. type NamespaceName string const ( @@ -34,7 +34,7 @@ const ( DefaultNamespaceWeight = 1 ) -// NamespaceInfo records information of namespace +// NamespaceInfo records information of namespace. type NamespaceInfo struct { // Name is the name of this namespace Name NamespaceName @@ -42,7 +42,7 @@ type NamespaceInfo struct { Weight int64 } -// GetWeight returns weight of a namespace, any invalid case would get default value +// GetWeight returns weight of a namespace, any invalid case would get default value. func (n *NamespaceInfo) GetWeight() int64 { if n == nil || n.Weight == 0 { return DefaultNamespaceWeight @@ -63,21 +63,21 @@ func quotaItemKeyFunc(obj interface{}) (string, error) { return item.name, nil } -// for big root heap +// for big root heap. func quotaItemLessFunc(a interface{}, b interface{}) bool { A := a.(*quotaItem) B := b.(*quotaItem) return A.weight > B.weight } -// NamespaceCollection will record all details about namespace +// NamespaceCollection will record all details about namespace. type NamespaceCollection struct { Name string quotaWeight *cache.Heap } -// NewNamespaceCollection creates new NamespaceCollection object to record all information about a namespace +// NewNamespaceCollection creates new NamespaceCollection object to record all information about a namespace. func NewNamespaceCollection(name string) *NamespaceCollection { n := &NamespaceCollection{ Name: name, @@ -115,17 +115,17 @@ func itemFromQuota(quota *v1.ResourceQuota) *quotaItem { return item } -// Update modify the registered information according quota object +// Update modify the registered information according quota object. func (n *NamespaceCollection) Update(quota *v1.ResourceQuota) { n.updateWeight(itemFromQuota(quota)) } -// Delete remove the registered information according quota object +// Delete remove the registered information according quota object. func (n *NamespaceCollection) Delete(quota *v1.ResourceQuota) { n.deleteWeight(itemFromQuota(quota)) } -// Snapshot will clone a NamespaceInfo without Heap according NamespaceCollection +// Snapshot will clone a NamespaceInfo without Heap according NamespaceCollection. func (n *NamespaceCollection) Snapshot() *NamespaceInfo { var weight int64 = DefaultNamespaceWeight diff --git a/pkg/scheduler/api/node_info.go b/pkg/scheduler/api/node_info.go index db5b93e0c06..ccefb5c7ad2 100644 --- a/pkg/scheduler/api/node_info.go +++ b/pkg/scheduler/api/node_info.go @@ -63,7 +63,7 @@ type NodeState struct { Reason string } -// NewNodeInfo is used to create new nodeInfo object +// NewNodeInfo is used to create new nodeInfo object. func NewNodeInfo(node *v1.Node) *NodeInfo { var ni *NodeInfo @@ -101,7 +101,7 @@ func NewNodeInfo(node *v1.Node) *NodeInfo { return ni } -// Clone used to clone nodeInfo Object + func (ni *NodeInfo) Clone() *NodeInfo { res := NewNodeInfo(ni.Node) @@ -112,7 +112,7 @@ func (ni *NodeInfo) Clone() *NodeInfo { return res } -// Ready returns whether node is ready for scheduling + func (ni *NodeInfo) Ready() bool { return ni.State.Phase == Ready } @@ -154,7 +154,7 @@ func (ni *NodeInfo) setNodeState(node *v1.Node) { } } -// SetNode sets kubernetes node object to nodeInfo object + func (ni *NodeInfo) SetNode(node *v1.Node) { ni.setNodeState(node) @@ -291,7 +291,7 @@ func (ni *NodeInfo) UpdateTask(ti *TaskInfo) error { return nil } -// String returns nodeInfo details in string format + func (ni NodeInfo) String() string { tasks := "" @@ -306,7 +306,7 @@ func (ni NodeInfo) String() string { } -// Pods returns all pods running in that node + func (ni *NodeInfo) Pods() (pods []*v1.Pod) { for _, t := range ni.Tasks { pods = append(pods, t.Pod) diff --git a/pkg/scheduler/api/pod_info.go b/pkg/scheduler/api/pod_info.go index 56a94034bb9..fd22a570bd2 100644 --- a/pkg/scheduler/api/pod_info.go +++ b/pkg/scheduler/api/pod_info.go @@ -49,7 +49,7 @@ import ( // // Result: CPU: 3, Memory: 3G -// GetPodResourceRequest returns all the resource required for that pod + func GetPodResourceRequest(pod *v1.Pod) *Resource { result := GetPodResourceWithoutInitContainers(pod) diff --git a/pkg/scheduler/api/queue_info.go b/pkg/scheduler/api/queue_info.go index a969c57afe9..e4ae6da23ed 100644 --- a/pkg/scheduler/api/queue_info.go +++ b/pkg/scheduler/api/queue_info.go @@ -22,10 +22,10 @@ import ( "volcano.sh/volcano/pkg/apis/scheduling" ) -// QueueID is UID type, serves as unique ID for each queue + type QueueID types.UID -// QueueInfo will have all details about queue + type QueueInfo struct { UID QueueID Name string @@ -35,7 +35,7 @@ type QueueInfo struct { Queue *scheduling.Queue } -// NewQueueInfo creates new queueInfo object + func NewQueueInfo(queue *scheduling.Queue) *QueueInfo { return &QueueInfo{ UID: QueueID(queue.Name), @@ -47,7 +47,7 @@ func NewQueueInfo(queue *scheduling.Queue) *QueueInfo { } } -// Clone is used to clone queueInfo object + func (q *QueueInfo) Clone() *QueueInfo { return &QueueInfo{ UID: q.UID, @@ -57,7 +57,7 @@ func (q *QueueInfo) Clone() *QueueInfo { } } -// Reclaimable return whether queue is reclaimable + func (q *QueueInfo) Reclaimable() bool { if q == nil { return false diff --git a/pkg/scheduler/api/resource_info.go b/pkg/scheduler/api/resource_info.go index 0cb98212444..2214e1b7cb4 100644 --- a/pkg/scheduler/api/resource_info.go +++ b/pkg/scheduler/api/resource_info.go @@ -26,7 +26,7 @@ import ( "volcano.sh/volcano/pkg/scheduler/util/assert" ) -// Resource struct defines all the resource type + type Resource struct { MilliCPU float64 Memory float64 @@ -44,12 +44,12 @@ const ( GPUResourceName = "nvidia.com/gpu" ) -// EmptyResource creates a empty resource object and returns + func EmptyResource() *Resource { return &Resource{} } -// Clone is used to clone a resource type + func (r *Resource) Clone() *Resource { clone := &Resource{ MilliCPU: r.MilliCPU, @@ -71,7 +71,7 @@ var minMilliCPU float64 = 10 var minMilliScalarResources float64 = 10 var minMemory float64 = 10 * 1024 * 1024 -// NewResource create a new resource object from resource list + func NewResource(rl v1.ResourceList) *Resource { r := EmptyResource() for rName, rQuant := range rl { @@ -92,7 +92,7 @@ func NewResource(rl v1.ResourceList) *Resource { return r } -// IsEmpty returns bool after checking any of resource is less than min possible value + func (r *Resource) IsEmpty() bool { if !(r.MilliCPU < minMilliCPU && r.Memory < minMemory) { return false @@ -107,7 +107,7 @@ func (r *Resource) IsEmpty() bool { return true } -// IsZero checks whether that resource is less than min possible value + func (r *Resource) IsZero(rn v1.ResourceName) bool { switch rn { case v1.ResourceCPU: @@ -126,7 +126,7 @@ func (r *Resource) IsZero(rn v1.ResourceName) bool { } } -// Add is used to add the two resources + func (r *Resource) Add(rr *Resource) *Resource { r.MilliCPU += rr.MilliCPU r.Memory += rr.Memory @@ -212,7 +212,7 @@ func (r *Resource) FitDelta(rr *Resource) *Resource { return r } -// Multi multiples the resource with ratio provided + func (r *Resource) Multi(ratio float64) *Resource { r.MilliCPU = r.MilliCPU * ratio r.Memory = r.Memory * ratio @@ -222,7 +222,7 @@ func (r *Resource) Multi(ratio float64) *Resource { return r } -// Less checks whether a resource is less than other + func (r *Resource) Less(rr *Resource) bool { lessFunc := func(l, r float64) bool { if l < r { @@ -263,7 +263,7 @@ func (r *Resource) Less(rr *Resource) bool { return true } -// LessEqualStrict checks whether a resource is less or equal than other + func (r *Resource) LessEqualStrict(rr *Resource) bool { lessFunc := func(l, r float64) bool { if l <= r { @@ -288,7 +288,7 @@ func (r *Resource) LessEqualStrict(rr *Resource) bool { return true } -// LessEqual checks whether a resource is less than other resource + func (r *Resource) LessEqual(rr *Resource) bool { lessEqualFunc := func(l, r, diff float64) bool { if l < r || math.Abs(l-r) < diff { @@ -325,7 +325,7 @@ func (r *Resource) LessEqual(rr *Resource) bool { return true } -// Diff calculate the difference between two resource + func (r *Resource) Diff(rr *Resource) (*Resource, *Resource) { increasedVal := EmptyResource() decreasedVal := EmptyResource() @@ -360,7 +360,7 @@ func (r *Resource) Diff(rr *Resource) (*Resource, *Resource) { return increasedVal, decreasedVal } -// String returns resource details in string format + func (r *Resource) String() string { str := fmt.Sprintf("cpu %0.2f, memory %0.2f", r.MilliCPU, r.Memory) for rName, rQuant := range r.ScalarResources { @@ -369,7 +369,7 @@ func (r *Resource) String() string { return str } -// Get returns the resource value for that particular resource type + func (r *Resource) Get(rn v1.ResourceName) float64 { switch rn { case v1.ResourceCPU: @@ -384,7 +384,7 @@ func (r *Resource) Get(rn v1.ResourceName) float64 { } } -// ResourceNames returns all resource types + func (r *Resource) ResourceNames() []v1.ResourceName { resNames := []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory} diff --git a/pkg/scheduler/api/types.go b/pkg/scheduler/api/types.go index 026ce6b2a82..48043a9b749 100644 --- a/pkg/scheduler/api/types.go +++ b/pkg/scheduler/api/types.go @@ -82,7 +82,7 @@ func (ts TaskStatus) String() string { } } -// NodePhase defines the phase of node + type NodePhase int const ( @@ -117,7 +117,7 @@ type CompareFn func(interface{}, interface{}) int // ValidateFn is the func declaration used to check object's status. type ValidateFn func(interface{}) bool -// ValidateResult is struct to which can used to determine the result + type ValidateResult struct { Pass bool Reason string diff --git a/pkg/scheduler/api/unschedule_info.go b/pkg/scheduler/api/unschedule_info.go index ffdfa78f061..6fcc297ceda 100644 --- a/pkg/scheduler/api/unschedule_info.go +++ b/pkg/scheduler/api/unschedule_info.go @@ -18,25 +18,25 @@ const ( AllNodeUnavailableMsg = "all nodes are unavailable" ) -// FitErrors is set of FitError on many nodes + type FitErrors struct { nodes map[string]*FitError err string } -// NewFitErrors returns an FitErrors + func NewFitErrors() *FitErrors { f := new(FitErrors) f.nodes = make(map[string]*FitError) return f } -// SetError set the common error message in FitErrors + func (f *FitErrors) SetError(err string) { f.err = err } -// SetNodeError set the node error in FitErrors + func (f *FitErrors) SetNodeError(nodeName string, err error) { var fe *FitError switch obj := err.(type) { @@ -53,7 +53,7 @@ func (f *FitErrors) SetNodeError(nodeName string, err error) { f.nodes[nodeName] = fe } -// Error returns the final error message + func (f *FitErrors) Error() string { reasons := make(map[string]int) @@ -78,7 +78,7 @@ func (f *FitErrors) Error() string { return reasonMsg } -// FitError describe the reason why task could not fit that node + type FitError struct { taskNamespace string taskName string @@ -86,7 +86,7 @@ type FitError struct { Reasons []string } -// NewFitError return FitError by message + func NewFitError(task *TaskInfo, node *NodeInfo, message ...string) *FitError { fe := &FitError{ taskName: task.Name, @@ -97,7 +97,7 @@ func NewFitError(task *TaskInfo, node *NodeInfo, message ...string) *FitError { return fe } -// NewFitErrorByReasons return FitError by reasons + func NewFitErrorByReasons(task *TaskInfo, node *NodeInfo, reasons ...predicates.PredicateFailureReason) *FitError { message := make([]string, 0, len(reasons)) for _, reason := range reasons { @@ -106,7 +106,7 @@ func NewFitErrorByReasons(task *TaskInfo, node *NodeInfo, reasons ...predicates. return NewFitError(task, node, message...) } -// Error returns the final error message + func (f *FitError) Error() string { return fmt.Sprintf("task %s/%s on node %s fit failed: %s", f.taskNamespace, f.taskName, f.NodeName, strings.Join(f.Reasons, ", ")) } diff --git a/pkg/scheduler/cache/cache.go b/pkg/scheduler/cache/cache.go index 0cb20b70748..96c1c43d6e0 100644 --- a/pkg/scheduler/cache/cache.go +++ b/pkg/scheduler/cache/cache.go @@ -65,7 +65,7 @@ func New(config *rest.Config, schedulerName string, defaultQueue string) Cache { return newSchedulerCache(config, schedulerName, defaultQueue) } -//SchedulerCache cache for the kube batch +//SchedulerCache cache for the kube batch. type SchedulerCache struct { sync.Mutex @@ -111,7 +111,7 @@ type defaultBinder struct { kubeclient *kubernetes.Clientset } -//Bind will send bind request to api server +// Bind will send bind request to api server. func (db *defaultBinder) Bind(p *v1.Pod, hostname string) error { if err := db.kubeclient.CoreV1().Pods(p.Namespace).Bind(&v1.Binding{ ObjectMeta: metav1.ObjectMeta{Namespace: p.Namespace, Name: p.Name, UID: p.UID, Annotations: p.Annotations}, @@ -130,7 +130,7 @@ type defaultEvictor struct { kubeclient *kubernetes.Clientset } -//Evict will send delete pod request to api server +//Evict will send delete pod request to api server. func (de *defaultEvictor) Evict(p *v1.Pod) error { klog.V(3).Infof("Evicting pod %v/%v", p.Namespace, p.Name) @@ -141,13 +141,13 @@ func (de *defaultEvictor) Evict(p *v1.Pod) error { return nil } -// defaultStatusUpdater is the default implementation of the StatusUpdater interface +// defaultStatusUpdater is the default implementation of the StatusUpdater interface. type defaultStatusUpdater struct { kubeclient *kubernetes.Clientset vcclient *vcclient.Clientset } -// following the same logic as podutil.UpdatePodCondition +// following the same logic as podutil.UpdatePodCondition. func podConditionHaveUpdate(status *v1.PodStatus, condition *v1.PodCondition) bool { lastTransitionTime := metav1.Now() // Try to find this pod condition. @@ -172,7 +172,7 @@ func podConditionHaveUpdate(status *v1.PodStatus, condition *v1.PodCondition) bo return !isEqual } -// UpdatePodCondition will Update pod with podCondition +// UpdatePodCondition will Update pod with podCondition. func (su *defaultStatusUpdater) UpdatePodCondition(pod *v1.Pod, condition *v1.PodCondition) (*v1.Pod, error) { klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status) if podutil.UpdatePodCondition(&pod.Status, condition) { @@ -181,7 +181,7 @@ func (su *defaultStatusUpdater) UpdatePodCondition(pod *v1.Pod, condition *v1.Po return pod, nil } -// UpdatePodGroup will Update pod with podCondition +// UpdatePodGroup will Update pod with podCondition. func (su *defaultStatusUpdater) UpdatePodGroup(pg *schedulingapi.PodGroup) (*schedulingapi.PodGroup, error) { podgroup := &vcv1beta1.PodGroup{} if err := schedulingscheme.Scheme.Convert(&pg.PodGroup, podgroup, nil); err != nil { @@ -208,7 +208,7 @@ type defaultVolumeBinder struct { volumeBinder *volumebinder.VolumeBinder } -// AllocateVolumes allocates volume on the host to the task +// AllocateVolumes allocates volume on the host to the task. func (dvb *defaultVolumeBinder) AllocateVolumes(task *schedulingapi.TaskInfo, hostname string) error { allBound, err := dvb.volumeBinder.Binder.AssumePodVolumes(task.Pod, hostname) task.VolumeReady = allBound @@ -216,7 +216,7 @@ func (dvb *defaultVolumeBinder) AllocateVolumes(task *schedulingapi.TaskInfo, ho return err } -// BindVolumes binds volumes to the task +// BindVolumes binds volumes to the task. func (dvb *defaultVolumeBinder) BindVolumes(task *schedulingapi.TaskInfo) error { // If task's volumes are ready, did not bind them again. if task.VolumeReady { @@ -373,7 +373,7 @@ func newSchedulerCache(config *rest.Config, schedulerName string, defaultQueue s return sc } -// Run starts the schedulerCache +// Run starts the schedulerCache. func (sc *SchedulerCache) Run(stopCh <-chan struct{}) { go sc.podInformer.Informer().Run(stopCh) go sc.nodeInformer.Informer().Run(stopCh) @@ -395,7 +395,7 @@ func (sc *SchedulerCache) Run(stopCh <-chan struct{}) { go wait.Until(sc.processCleanupJob, 0, stopCh) } -// WaitForCacheSync sync the cache with the api server +// WaitForCacheSync sync the cache with the api server. func (sc *SchedulerCache) WaitForCacheSync(stopCh <-chan struct{}) bool { return cache.WaitForCacheSync(stopCh, @@ -418,7 +418,7 @@ func (sc *SchedulerCache) WaitForCacheSync(stopCh <-chan struct{}) bool { ) } -// findJobAndTask returns job and the task info +// findJobAndTask returns job and the task info. func (sc *SchedulerCache) findJobAndTask(taskInfo *schedulingapi.TaskInfo) (*schedulingapi.JobInfo, *schedulingapi.TaskInfo, error) { job, found := sc.Jobs[taskInfo.Job] if !found { @@ -556,17 +556,17 @@ func (sc *SchedulerCache) Bind(taskInfo *schedulingapi.TaskInfo, hostname string return nil } -// AllocateVolumes allocates volume on the host to the task + func (sc *SchedulerCache) AllocateVolumes(task *schedulingapi.TaskInfo, hostname string) error { return sc.VolumeBinder.AllocateVolumes(task, hostname) } -// BindVolumes binds volumes to the task + func (sc *SchedulerCache) BindVolumes(task *schedulingapi.TaskInfo) error { return sc.VolumeBinder.BindVolumes(task) } -// taskUnschedulable updates pod status of pending task + func (sc *SchedulerCache) taskUnschedulable(task *schedulingapi.TaskInfo, message string) error { pod := task.Pod @@ -650,7 +650,7 @@ func (sc *SchedulerCache) processResyncTask() { } } -// Snapshot returns the complete snapshot of the cluster from cache + func (sc *SchedulerCache) Snapshot() *schedulingapi.ClusterInfo { sc.Mutex.Lock() defer sc.Mutex.Unlock() @@ -731,7 +731,7 @@ func (sc *SchedulerCache) Snapshot() *schedulingapi.ClusterInfo { return snapshot } -// String returns information about the cache in a string format + func (sc *SchedulerCache) String() string { sc.Mutex.Lock() defer sc.Mutex.Unlock() diff --git a/pkg/scheduler/cache/event_handlers.go b/pkg/scheduler/cache/event_handlers.go index cde7cb620c0..c79946292b6 100644 --- a/pkg/scheduler/cache/event_handlers.go +++ b/pkg/scheduler/cache/event_handlers.go @@ -170,7 +170,7 @@ func (sc *SchedulerCache) deletePod(pod *v1.Pod) error { return nil } -// AddPod add pod to scheduler cache + func (sc *SchedulerCache) AddPod(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { @@ -191,7 +191,7 @@ func (sc *SchedulerCache) AddPod(obj interface{}) { return } -// UpdatePod update pod to scheduler cache + func (sc *SchedulerCache) UpdatePod(oldObj, newObj interface{}) { oldPod, ok := oldObj.(*v1.Pod) if !ok { @@ -218,7 +218,7 @@ func (sc *SchedulerCache) UpdatePod(oldObj, newObj interface{}) { return } -// DeletePod delete pod from scheduler cache + func (sc *SchedulerCache) DeletePod(obj interface{}) { var pod *v1.Pod switch t := obj.(type) { @@ -279,7 +279,7 @@ func (sc *SchedulerCache) deleteNode(node *v1.Node) error { return nil } -// AddNode add node to scheduler cache + func (sc *SchedulerCache) AddNode(obj interface{}) { node, ok := obj.(*v1.Node) if !ok { @@ -298,7 +298,7 @@ func (sc *SchedulerCache) AddNode(obj interface{}) { return } -// UpdateNode update node to scheduler cache + func (sc *SchedulerCache) UpdateNode(oldObj, newObj interface{}) { oldNode, ok := oldObj.(*v1.Node) if !ok { @@ -322,7 +322,7 @@ func (sc *SchedulerCache) UpdateNode(oldObj, newObj interface{}) { return } -// DeleteNode delete node from scheduler cache + func (sc *SchedulerCache) DeleteNode(obj interface{}) { var node *v1.Node switch t := obj.(type) { @@ -392,7 +392,7 @@ func (sc *SchedulerCache) deletePodGroup(id schedulingapi.JobID) error { return nil } -// AddPodGroupV1beta1 add podgroup to scheduler cache + func (sc *SchedulerCache) AddPodGroupV1beta1(obj interface{}) { ss, ok := obj.(*schedulingv1.PodGroup) if !ok { @@ -420,7 +420,7 @@ func (sc *SchedulerCache) AddPodGroupV1beta1(obj interface{}) { return } -// UpdatePodGroupV1beta1 add podgroup to scheduler cache + func (sc *SchedulerCache) UpdatePodGroupV1beta1(oldObj, newObj interface{}) { oldSS, ok := oldObj.(*schedulingv1.PodGroup) if !ok { @@ -455,7 +455,7 @@ func (sc *SchedulerCache) UpdatePodGroupV1beta1(oldObj, newObj interface{}) { return } -// DeletePodGroupV1beta1 delete podgroup from scheduler cache + func (sc *SchedulerCache) DeletePodGroupV1beta1(obj interface{}) { var ss *schedulingv1.PodGroup switch t := obj.(type) { @@ -485,7 +485,7 @@ func (sc *SchedulerCache) DeletePodGroupV1beta1(obj interface{}) { return } -// AddQueueV1beta1 add queue to scheduler cache + func (sc *SchedulerCache) AddQueueV1beta1(obj interface{}) { ss, ok := obj.(*schedulingv1.Queue) if !ok { @@ -508,7 +508,7 @@ func (sc *SchedulerCache) AddQueueV1beta1(obj interface{}) { return } -// UpdateQueueV1beta1 update queue to scheduler cache + func (sc *SchedulerCache) UpdateQueueV1beta1(oldObj, newObj interface{}) { oldSS, ok := oldObj.(*schedulingv1.Queue) if !ok { @@ -538,7 +538,7 @@ func (sc *SchedulerCache) UpdateQueueV1beta1(oldObj, newObj interface{}) { return } -// DeleteQueueV1beta1 delete queue from the scheduler cache + func (sc *SchedulerCache) DeleteQueueV1beta1(obj interface{}) { var ss *schedulingv1.Queue switch t := obj.(type) { @@ -576,7 +576,7 @@ func (sc *SchedulerCache) deleteQueue(id schedulingapi.QueueID) { delete(sc.Queues, id) } -//DeletePriorityClass delete priorityclass from the scheduler cache + func (sc *SchedulerCache) DeletePriorityClass(obj interface{}) { var ss *v1beta1.PriorityClass switch t := obj.(type) { @@ -600,7 +600,7 @@ func (sc *SchedulerCache) DeletePriorityClass(obj interface{}) { sc.deletePriorityClass(ss) } -//UpdatePriorityClass update priorityclass to scheduler cache + func (sc *SchedulerCache) UpdatePriorityClass(oldObj, newObj interface{}) { oldSS, ok := oldObj.(*v1beta1.PriorityClass) if !ok { @@ -625,7 +625,7 @@ func (sc *SchedulerCache) UpdatePriorityClass(oldObj, newObj interface{}) { sc.addPriorityClass(newSS) } -//AddPriorityClass add priorityclass to scheduler cache + func (sc *SchedulerCache) AddPriorityClass(obj interface{}) { var ss *v1beta1.PriorityClass switch t := obj.(type) { @@ -692,7 +692,7 @@ func (sc *SchedulerCache) deleteResourceQuota(quota *v1.ResourceQuota) { collection.Delete(quota) } -// DeleteResourceQuota delete ResourceQuota from the scheduler cache + func (sc *SchedulerCache) DeleteResourceQuota(obj interface{}) { var r *v1.ResourceQuota switch t := obj.(type) { @@ -717,7 +717,7 @@ func (sc *SchedulerCache) DeleteResourceQuota(obj interface{}) { sc.deleteResourceQuota(r) } -// UpdateResourceQuota update ResourceQuota to scheduler cache + func (sc *SchedulerCache) UpdateResourceQuota(oldObj, newObj interface{}) { newR, ok := newObj.(*v1.ResourceQuota) if !ok { @@ -732,7 +732,7 @@ func (sc *SchedulerCache) UpdateResourceQuota(oldObj, newObj interface{}) { sc.updateResourceQuota(newR) } -// AddResourceQuota add ResourceQuota to scheduler cache + func (sc *SchedulerCache) AddResourceQuota(obj interface{}) { var r *v1.ResourceQuota switch t := obj.(type) { diff --git a/pkg/scheduler/cache/interface.go b/pkg/scheduler/cache/interface.go index 7d74dfd67ae..5dcb5624f87 100644 --- a/pkg/scheduler/cache/interface.go +++ b/pkg/scheduler/cache/interface.go @@ -23,7 +23,7 @@ import ( ) // Cache collects pods/nodes/queues information -// and provides information snapshot + type Cache interface { // Run start informer Run(stopCh <-chan struct{}) @@ -55,23 +55,23 @@ type Cache interface { BindVolumes(task *api.TaskInfo) error } -// VolumeBinder interface for allocate and bind volumes + type VolumeBinder interface { AllocateVolumes(task *api.TaskInfo, hostname string) error BindVolumes(task *api.TaskInfo) error } -//Binder interface for binding task and hostname + type Binder interface { Bind(task *v1.Pod, hostname string) error } -// Evictor interface for evict pods + type Evictor interface { Evict(pod *v1.Pod) error } -// StatusUpdater updates pod with given PodCondition + type StatusUpdater interface { UpdatePodCondition(pod *v1.Pod, podCondition *v1.PodCondition) (*v1.Pod, error) UpdatePodGroup(pg *api.PodGroup) (*api.PodGroup, error) diff --git a/pkg/scheduler/conf/scheduler_conf.go b/pkg/scheduler/conf/scheduler_conf.go index b292c74094b..a812f8cf4b3 100644 --- a/pkg/scheduler/conf/scheduler_conf.go +++ b/pkg/scheduler/conf/scheduler_conf.go @@ -26,12 +26,12 @@ type SchedulerConfiguration struct { Configurations []Configuration `yaml:"configurations"` } -// Tier defines plugin tier +// Tier defines plugin tier. type Tier struct { Plugins []PluginOption `yaml:"plugins"` } -// Configuration is configuration of action +// Configuration is configuration of action. type Configuration struct { // Name is name of action Name string `yaml:"name"` @@ -39,7 +39,7 @@ type Configuration struct { Arguments map[string]string `yaml:"arguments"` } -// PluginOption defines the options of plugin +// PluginOption defines the options of plugin. type PluginOption struct { // The name of Plugin Name string `yaml:"name"` diff --git a/pkg/scheduler/framework/arguments.go b/pkg/scheduler/framework/arguments.go index be31cc78058..8589e6b48d3 100644 --- a/pkg/scheduler/framework/arguments.go +++ b/pkg/scheduler/framework/arguments.go @@ -24,10 +24,10 @@ import ( "k8s.io/klog" ) -// Arguments map +// Arguments map. type Arguments map[string]string -// GetInt get the integer value from string +// GetInt get the integer value from string. func (a Arguments) GetInt(ptr *int, key string) { if ptr == nil { return @@ -47,7 +47,7 @@ func (a Arguments) GetInt(ptr *int, key string) { *ptr = value } -// GetFloat64 get the float64 value from string +// GetFloat64 get the float64 value from string. func (a Arguments) GetFloat64(ptr *float64, key string) { if ptr == nil { return @@ -67,7 +67,7 @@ func (a Arguments) GetFloat64(ptr *float64, key string) { *ptr = value } -// GetBool get the bool value from string +// GetBool get the bool value from string. func (a Arguments) GetBool(ptr *bool, key string) { if ptr == nil { return @@ -87,7 +87,7 @@ func (a Arguments) GetBool(ptr *bool, key string) { *ptr = value } -// GetArgOfActionFromConf return argument of action reading from configuration of schedule +// GetArgOfActionFromConf return argument of action reading from configuration of schedule. func GetArgOfActionFromConf(configurations []conf.Configuration, actionName string) Arguments { for _, c := range configurations { if c.Name == actionName { diff --git a/pkg/scheduler/framework/event.go b/pkg/scheduler/framework/event.go index d21f5e9fdb1..9f4123185d9 100644 --- a/pkg/scheduler/framework/event.go +++ b/pkg/scheduler/framework/event.go @@ -20,12 +20,12 @@ import ( "volcano.sh/volcano/pkg/scheduler/api" ) -// Event structure +// Event structure. type Event struct { Task *api.TaskInfo } -// EventHandler structure +// EventHandler structure. type EventHandler struct { AllocateFunc func(event *Event) DeallocateFunc func(event *Event) diff --git a/pkg/scheduler/framework/framework.go b/pkg/scheduler/framework/framework.go index f66275769e4..741acb02e1e 100644 --- a/pkg/scheduler/framework/framework.go +++ b/pkg/scheduler/framework/framework.go @@ -26,7 +26,7 @@ import ( "volcano.sh/volcano/pkg/scheduler/metrics" ) -// OpenSession start the session +// OpenSession start the session. func OpenSession(cache cache.Cache, tiers []conf.Tier, configurations []conf.Configuration) *Session { ssn := openSession(cache) ssn.Tiers = tiers @@ -52,7 +52,7 @@ func OpenSession(cache cache.Cache, tiers []conf.Tier, configurations []conf.Con return ssn } -// CloseSession close the session +// CloseSession close the session. func CloseSession(ssn *Session) { for _, plugin := range ssn.plugins { onSessionCloseStart := time.Now() diff --git a/pkg/scheduler/framework/interface.go b/pkg/scheduler/framework/interface.go index 78878878f81..bd79fe66181 100644 --- a/pkg/scheduler/framework/interface.go +++ b/pkg/scheduler/framework/interface.go @@ -31,7 +31,7 @@ type Action interface { UnInitialize() } -// Plugin is the interface of scheduler plugin +// Plugin is the interface of scheduler plugin. type Plugin interface { // The unique name of Plugin. Name() string diff --git a/pkg/scheduler/framework/job_updater.go b/pkg/scheduler/framework/job_updater.go index 98454fbc39a..fc126aaf7e7 100644 --- a/pkg/scheduler/framework/job_updater.go +++ b/pkg/scheduler/framework/job_updater.go @@ -20,7 +20,7 @@ const ( jobConditionUpdateTimeJitter = 30 * time.Second ) -// TimeJitterAfter means: new after old + duration + jitter + func TimeJitterAfter(new, old time.Time, duration, maxJitter time.Duration) bool { var jitter int64 if maxJitter > 0 { @@ -98,7 +98,7 @@ func isPodGroupStatusUpdated(newStatus, oldStatus *scheduling.PodGroupStatus) bo return shouldUpdate } -// updateJob update specified job + func (ju *jobUpdater) updateJob(index int) { job := ju.jobQueue[index] ssn := ju.ssn diff --git a/pkg/scheduler/framework/plugins.go b/pkg/scheduler/framework/plugins.go index 7d5c736fc07..8d09824a85c 100644 --- a/pkg/scheduler/framework/plugins.go +++ b/pkg/scheduler/framework/plugins.go @@ -20,13 +20,13 @@ import "sync" var pluginMutex sync.Mutex -// PluginBuilder plugin management + type PluginBuilder func(Arguments) Plugin -// Plugin management + var pluginBuilders = map[string]PluginBuilder{} -// RegisterPluginBuilder register the plugin + func RegisterPluginBuilder(name string, pc PluginBuilder) { pluginMutex.Lock() defer pluginMutex.Unlock() @@ -34,7 +34,7 @@ func RegisterPluginBuilder(name string, pc PluginBuilder) { pluginBuilders[name] = pc } -// CleanupPluginBuilders cleans up all the plugin + func CleanupPluginBuilders() { pluginMutex.Lock() defer pluginMutex.Unlock() @@ -42,7 +42,7 @@ func CleanupPluginBuilders() { pluginBuilders = map[string]PluginBuilder{} } -// GetPluginBuilder get the pluginbuilder by name + func GetPluginBuilder(name string) (PluginBuilder, bool) { pluginMutex.Lock() defer pluginMutex.Unlock() @@ -51,10 +51,10 @@ func GetPluginBuilder(name string) (PluginBuilder, bool) { return pb, found } -// Action management + var actionMap = map[string]Action{} -// RegisterAction register action + func RegisterAction(act Action) { pluginMutex.Lock() defer pluginMutex.Unlock() @@ -62,7 +62,7 @@ func RegisterAction(act Action) { actionMap[act.Name()] = act } -// GetAction get the action by name + func GetAction(name string) (Action, bool) { pluginMutex.Lock() defer pluginMutex.Unlock() diff --git a/pkg/scheduler/framework/session.go b/pkg/scheduler/framework/session.go index 50a5bfc3bd3..885e17d307e 100644 --- a/pkg/scheduler/framework/session.go +++ b/pkg/scheduler/framework/session.go @@ -32,7 +32,7 @@ import ( "volcano.sh/volcano/pkg/scheduler/metrics" ) -// Session information for the current session + type Session struct { UID types.UID @@ -196,14 +196,14 @@ func jobStatus(ssn *Session, jobInfo *api.JobInfo) scheduling.PodGroupStatus { return status } -// Statement returns new statement object + func (ssn *Session) Statement() *Statement { return &Statement{ ssn: ssn, } } -// Pipeline the task to the node in the session + func (ssn *Session) Pipeline(task *api.TaskInfo, hostname string) error { // Only update status in session job, found := ssn.Jobs[task.Job] @@ -246,7 +246,7 @@ func (ssn *Session) Pipeline(task *api.TaskInfo, hostname string) error { return nil } -//Allocate the task to the node in the session + func (ssn *Session) Allocate(task *api.TaskInfo, hostname string) error { if err := ssn.cache.AllocateVolumes(task, hostname); err != nil { return err @@ -330,7 +330,7 @@ func (ssn *Session) dispatch(task *api.TaskInfo) error { return nil } -//Evict the task in the session + func (ssn *Session) Evict(reclaimee *api.TaskInfo, reason string) error { if err := ssn.cache.Evict(reclaimee, reason); err != nil { return err @@ -395,12 +395,12 @@ func (ssn *Session) UpdateJobCondition(jobInfo *api.JobInfo, cond *scheduling.Po return nil } -// AddEventHandler add event handlers + func (ssn *Session) AddEventHandler(eh *EventHandler) { ssn.eventHandlers = append(ssn.eventHandlers, eh) } -//String return nodes and jobs information in the session + func (ssn Session) String() string { msg := fmt.Sprintf("Session %v: \n", ssn.UID) diff --git a/pkg/scheduler/framework/session_plugins.go b/pkg/scheduler/framework/session_plugins.go index 120aebe7d38..dc4b1859dd1 100644 --- a/pkg/scheduler/framework/session_plugins.go +++ b/pkg/scheduler/framework/session_plugins.go @@ -22,92 +22,92 @@ import ( "volcano.sh/volcano/pkg/scheduler/api" ) -// AddJobOrderFn add job order function + func (ssn *Session) AddJobOrderFn(name string, cf api.CompareFn) { ssn.jobOrderFns[name] = cf } -// AddQueueOrderFn add queue order function + func (ssn *Session) AddQueueOrderFn(name string, qf api.CompareFn) { ssn.queueOrderFns[name] = qf } -// AddTaskOrderFn add task order function + func (ssn *Session) AddTaskOrderFn(name string, cf api.CompareFn) { ssn.taskOrderFns[name] = cf } -// AddNamespaceOrderFn add namespace order function + func (ssn *Session) AddNamespaceOrderFn(name string, cf api.CompareFn) { ssn.namespaceOrderFns[name] = cf } -// AddPreemptableFn add preemptable function + func (ssn *Session) AddPreemptableFn(name string, cf api.EvictableFn) { ssn.preemptableFns[name] = cf } -// AddReclaimableFn add Reclaimable function + func (ssn *Session) AddReclaimableFn(name string, rf api.EvictableFn) { ssn.reclaimableFns[name] = rf } -// AddJobReadyFn add JobReady function + func (ssn *Session) AddJobReadyFn(name string, vf api.ValidateFn) { ssn.jobReadyFns[name] = vf } -// AddJobPipelinedFn add pipelined function + func (ssn *Session) AddJobPipelinedFn(name string, vf api.ValidateFn) { ssn.jobPipelinedFns[name] = vf } -// AddPredicateFn add Predicate function + func (ssn *Session) AddPredicateFn(name string, pf api.PredicateFn) { ssn.predicateFns[name] = pf } -// AddBestNodeFn add BestNode function + func (ssn *Session) AddBestNodeFn(name string, pf api.BestNodeFn) { ssn.bestNodeFns[name] = pf } -// AddNodeOrderFn add Node order function + func (ssn *Session) AddNodeOrderFn(name string, pf api.NodeOrderFn) { ssn.nodeOrderFns[name] = pf } -// AddBatchNodeOrderFn add Batch Node order function + func (ssn *Session) AddBatchNodeOrderFn(name string, pf api.BatchNodeOrderFn) { ssn.batchNodeOrderFns[name] = pf } -// AddNodeMapFn add Node map function + func (ssn *Session) AddNodeMapFn(name string, pf api.NodeMapFn) { ssn.nodeMapFns[name] = pf } -// AddNodeReduceFn add Node reduce function + func (ssn *Session) AddNodeReduceFn(name string, pf api.NodeReduceFn) { ssn.nodeReduceFns[name] = pf } -// AddOverusedFn add overused function + func (ssn *Session) AddOverusedFn(name string, fn api.ValidateFn) { ssn.overusedFns[name] = fn } -// AddJobValidFn add jobvalid function + func (ssn *Session) AddJobValidFn(name string, fn api.ValidateExFn) { ssn.jobValidFns[name] = fn } -// AddJobEnqueueableFn add jobenqueueable function + func (ssn *Session) AddJobEnqueueableFn(name string, fn api.ValidateFn) { ssn.jobEnqueueableFns[name] = fn } -// Reclaimable invoke reclaimable function of the plugins + func (ssn *Session) Reclaimable(reclaimer *api.TaskInfo, reclaimees []*api.TaskInfo) []*api.TaskInfo { var victims []*api.TaskInfo var init bool @@ -149,7 +149,7 @@ func (ssn *Session) Reclaimable(reclaimer *api.TaskInfo, reclaimees []*api.TaskI return victims } -// Preemptable invoke preemptable function of the plugins + func (ssn *Session) Preemptable(preemptor *api.TaskInfo, preemptees []*api.TaskInfo) []*api.TaskInfo { var victims []*api.TaskInfo var init bool @@ -192,7 +192,7 @@ func (ssn *Session) Preemptable(preemptor *api.TaskInfo, preemptees []*api.TaskI return victims } -// Overused invoke overused function of the plugins + func (ssn *Session) Overused(queue *api.QueueInfo) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -209,7 +209,7 @@ func (ssn *Session) Overused(queue *api.QueueInfo) bool { return false } -// JobReady invoke jobready function of the plugins + func (ssn *Session) JobReady(obj interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -230,7 +230,7 @@ func (ssn *Session) JobReady(obj interface{}) bool { return true } -// JobPipelined invoke pipelined function of the plugins + func (ssn *Session) JobPipelined(obj interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -251,7 +251,7 @@ func (ssn *Session) JobPipelined(obj interface{}) bool { return true } -// JobValid invoke jobvalid function of the plugins + func (ssn *Session) JobValid(obj interface{}) *api.ValidateResult { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -270,7 +270,7 @@ func (ssn *Session) JobValid(obj interface{}) *api.ValidateResult { return nil } -// JobEnqueueable invoke jobEnqueueableFns function of the plugins + func (ssn *Session) JobEnqueueable(obj interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -288,7 +288,7 @@ func (ssn *Session) JobEnqueueable(obj interface{}) bool { return true } -// JobOrderFn invoke joborder function of the plugins + func (ssn *Session) JobOrderFn(l, r interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -315,7 +315,7 @@ func (ssn *Session) JobOrderFn(l, r interface{}) bool { } -// NamespaceOrderFn invoke namespaceorder function of the plugins + func (ssn *Session) NamespaceOrderFn(l, r interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -340,7 +340,7 @@ func (ssn *Session) NamespaceOrderFn(l, r interface{}) bool { return lv < rv } -// QueueOrderFn invoke queueorder function of the plugins + func (ssn *Session) QueueOrderFn(l, r interface{}) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -368,7 +368,7 @@ func (ssn *Session) QueueOrderFn(l, r interface{}) bool { } -// TaskCompareFns invoke taskorder function of the plugins + func (ssn *Session) TaskCompareFns(l, r interface{}) int { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -388,7 +388,7 @@ func (ssn *Session) TaskCompareFns(l, r interface{}) int { return 0 } -// TaskOrderFn invoke taskorder function of the plugins + func (ssn *Session) TaskOrderFn(l, r interface{}) bool { if res := ssn.TaskCompareFns(l, r); res != 0 { return res < 0 @@ -404,7 +404,7 @@ func (ssn *Session) TaskOrderFn(l, r interface{}) bool { } -// PredicateFn invoke predicate function of the plugins + func (ssn *Session) PredicateFn(task *api.TaskInfo, node *api.NodeInfo) error { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -424,7 +424,7 @@ func (ssn *Session) PredicateFn(task *api.TaskInfo, node *api.NodeInfo) error { return nil } -// BestNodeFn invoke bestNode function of the plugins + func (ssn *Session) BestNodeFn(task *api.TaskInfo, nodeScores map[float64][]*api.NodeInfo) *api.NodeInfo { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { @@ -444,7 +444,7 @@ func (ssn *Session) BestNodeFn(task *api.TaskInfo, nodeScores map[float64][]*api return nil } -// NodeOrderFn invoke node order function of the plugins + func (ssn *Session) NodeOrderFn(task *api.TaskInfo, node *api.NodeInfo) (float64, error) { priorityScore := 0.0 for _, tier := range ssn.Tiers { @@ -467,7 +467,7 @@ func (ssn *Session) NodeOrderFn(task *api.TaskInfo, node *api.NodeInfo) (float64 return priorityScore, nil } -// BatchNodeOrderFn invoke node order function of the plugins + func (ssn *Session) BatchNodeOrderFn(task *api.TaskInfo, nodes []*api.NodeInfo) (map[string]float64, error) { priorityScore := make(map[string]float64, len(nodes)) for _, tier := range ssn.Tiers { @@ -495,7 +495,7 @@ func isEnabled(enabled *bool) bool { return enabled != nil && *enabled } -// NodeOrderMapFn invoke node order function of the plugins + func (ssn *Session) NodeOrderMapFn(task *api.TaskInfo, node *api.NodeInfo) (map[string]float64, float64, error) { nodeScoreMap := map[string]float64{} var priorityScore float64 @@ -524,7 +524,7 @@ func (ssn *Session) NodeOrderMapFn(task *api.TaskInfo, node *api.NodeInfo) (map[ return nodeScoreMap, priorityScore, nil } -// NodeOrderReduceFn invoke node order function of the plugins + func (ssn *Session) NodeOrderReduceFn(task *api.TaskInfo, pluginNodeScoreMap map[string]schedulerapi.HostPriorityList) (map[string]float64, error) { nodeScoreMap := map[string]float64{} for _, tier := range ssn.Tiers { diff --git a/pkg/scheduler/framework/statement.go b/pkg/scheduler/framework/statement.go index fdacf38ff7e..b079a55ecde 100644 --- a/pkg/scheduler/framework/statement.go +++ b/pkg/scheduler/framework/statement.go @@ -25,7 +25,7 @@ import ( "volcano.sh/volcano/pkg/scheduler/metrics" ) -// Statement structure + type Statement struct { operations []operation ssn *Session @@ -36,7 +36,7 @@ type operation struct { args []interface{} } -//Evict the pod + func (s *Statement) Evict(reclaimee *api.TaskInfo, reason string) error { // Update status in session job, found := s.ssn.Jobs[reclaimee.Job] @@ -112,7 +112,7 @@ func (s *Statement) unevict(reclaimee *api.TaskInfo, reason string) error { return nil } -// Pipeline the task for the node + func (s *Statement) Pipeline(task *api.TaskInfo, hostname string) error { job, found := s.ssn.Jobs[task.Job] if found { @@ -196,7 +196,7 @@ func (s *Statement) unpipeline(task *api.TaskInfo) error { return nil } -// Allocate the task to node + func (s *Statement) Allocate(task *api.TaskInfo, hostname string) error { if err := s.ssn.cache.AllocateVolumes(task, hostname); err != nil { return err @@ -277,7 +277,7 @@ func (s *Statement) allocate(task *api.TaskInfo, hostname string) error { return nil } -// unallocate the pod for task + func (s *Statement) unallocate(task *api.TaskInfo, reason string) error { // Update status in session job, found := s.ssn.Jobs[task.Job] @@ -307,7 +307,7 @@ func (s *Statement) unallocate(task *api.TaskInfo, reason string) error { return nil } -// Discard operation for evict, pipeline and allocate + func (s *Statement) Discard() { klog.V(3).Info("Discarding operations ...") for i := len(s.operations) - 1; i >= 0; i-- { @@ -323,7 +323,7 @@ func (s *Statement) Discard() { } } -// Commit operation for evict and pipeline + func (s *Statement) Commit() { klog.V(3).Info("Committing operations ...") for _, op := range s.operations { diff --git a/pkg/scheduler/metrics/job.go b/pkg/scheduler/metrics/job.go index 7a16ab6d7d3..61ce8839992 100644 --- a/pkg/scheduler/metrics/job.go +++ b/pkg/scheduler/metrics/job.go @@ -39,7 +39,7 @@ var ( ) ) -// UpdateJobShare records share for one job +// UpdateJobShare records share for one job. func UpdateJobShare(jobNs, jobID string, share float64) { jobShare.WithLabelValues(jobNs, jobID).Set(share) } diff --git a/pkg/scheduler/metrics/metrics.go b/pkg/scheduler/metrics/metrics.go index a706a9b3c0c..554d8c634bc 100644 --- a/pkg/scheduler/metrics/metrics.go +++ b/pkg/scheduler/metrics/metrics.go @@ -112,47 +112,47 @@ var ( ) ) -// UpdatePluginDuration updates latency for every plugin +// UpdatePluginDuration updates latency for every plugin. func UpdatePluginDuration(pluginName, OnSessionStatus string, duration time.Duration) { pluginSchedulingLatency.WithLabelValues(pluginName, OnSessionStatus).Observe(DurationInMicroseconds(duration)) } -// UpdateActionDuration updates latency for every action +// UpdateActionDuration updates latency for every action. func UpdateActionDuration(actionName string, duration time.Duration) { actionSchedulingLatency.WithLabelValues(actionName).Observe(DurationInMicroseconds(duration)) } -// UpdateE2eDuration updates entire end to end scheduling latency +// UpdateE2eDuration updates entire end to end scheduling latency. func UpdateE2eDuration(duration time.Duration) { e2eSchedulingLatency.Observe(DurationInMilliseconds(duration)) } -// UpdateTaskScheduleDuration updates single task scheduling latency +// UpdateTaskScheduleDuration updates single task scheduling latency. func UpdateTaskScheduleDuration(duration time.Duration) { taskSchedulingLatency.Observe(DurationInMicroseconds(duration)) } -// UpdatePodScheduleStatus update pod schedule decision, could be Success, Failure, Error +// UpdatePodScheduleStatus update pod schedule decision, could be Success, Failure, Error. func UpdatePodScheduleStatus(label string, count int) { scheduleAttempts.WithLabelValues(label).Add(float64(count)) } -// UpdatePreemptionVictimsCount updates count of preemption victims +// UpdatePreemptionVictimsCount updates count of preemption victims. func UpdatePreemptionVictimsCount(victimsCount int) { preemptionVictims.Set(float64(victimsCount)) } -// RegisterPreemptionAttempts records number of attempts for preemtion +// RegisterPreemptionAttempts records number of attempts for preemtion. func RegisterPreemptionAttempts() { preemptionAttempts.Inc() } -// UpdateUnscheduleTaskCount records total number of unscheduleable tasks +// UpdateUnscheduleTaskCount records total number of unscheduleable tasks. func UpdateUnscheduleTaskCount(jobID string, taskCount int) { unscheduleTaskCount.WithLabelValues(jobID).Set(float64(taskCount)) } -// UpdateUnscheduleJobCount records total number of unscheduleable jobs +// UpdateUnscheduleJobCount records total number of unscheduleable jobs. func UpdateUnscheduleJobCount(jobCount int) { unscheduleJobCount.Set(float64(jobCount)) } @@ -172,7 +172,7 @@ func DurationInSeconds(duration time.Duration) float64 { return duration.Seconds() } -// Duration get the time since specified start +// Duration get the time since specified start. func Duration(start time.Time) time.Duration { return time.Since(start) } diff --git a/pkg/scheduler/metrics/namespace.go b/pkg/scheduler/metrics/namespace.go index 746e377ec33..e07ae51441b 100644 --- a/pkg/scheduler/metrics/namespace.go +++ b/pkg/scheduler/metrics/namespace.go @@ -47,17 +47,17 @@ var ( ) ) -// UpdateNamespaceShare records share for one namespace +// UpdateNamespaceShare records share for one namespace. func UpdateNamespaceShare(namespaceName string, share float64) { namespaceShare.WithLabelValues(namespaceName).Set(share) } -// UpdateNamespaceWeight records weight for one namespace +// UpdateNamespaceWeight records weight for one namespace. func UpdateNamespaceWeight(namespaceName string, weight int64) { namespaceWeight.WithLabelValues(namespaceName).Set(float64(weight)) } -// UpdateNamespaceWeightedShare records weighted share for one namespace +// UpdateNamespaceWeightedShare records weighted share for one namespace. func UpdateNamespaceWeightedShare(namespaceName string, weightedShare float64) { namespaceWeightedShare.WithLabelValues(namespaceName).Set(weightedShare) } diff --git a/pkg/scheduler/metrics/queue.go b/pkg/scheduler/metrics/queue.go index 3af5607134f..e6697264798 100644 --- a/pkg/scheduler/metrics/queue.go +++ b/pkg/scheduler/metrics/queue.go @@ -127,35 +127,35 @@ var ( ) ) -// UpdateQueueAllocated records allocated resources for one queue +// UpdateQueueAllocated records allocated resources for one queue. func UpdateQueueAllocated(queueName string, milliCPU, memory float64) { queueAllocatedMilliCPU.WithLabelValues(queueName).Set(milliCPU) queueAllocatedMemory.WithLabelValues(queueName).Set(memory) } -// UpdateQueueRequest records request resources for one queue +// UpdateQueueRequest records request resources for one queue. func UpdateQueueRequest(queueName string, milliCPU, memory float64) { queueRequestMilliCPU.WithLabelValues(queueName).Set(milliCPU) queueRequestMemory.WithLabelValues(queueName).Set(memory) } -// UpdateQueueDeserved records deserved resources for one queue +// UpdateQueueDeserved records deserved resources for one queue. func UpdateQueueDeserved(queueName string, milliCPU, memory float64) { queueDeservedMilliCPU.WithLabelValues(queueName).Set(milliCPU) queueDeservedMemory.WithLabelValues(queueName).Set(memory) } -// UpdateQueueShare records share for one queue +// UpdateQueueShare records share for one queue. func UpdateQueueShare(queueName string, share float64) { queueShare.WithLabelValues(queueName).Set(share) } -// UpdateQueueWeight records weight for one queue +// UpdateQueueWeight records weight for one queue. func UpdateQueueWeight(queueName string, weight int32) { queueWeight.WithLabelValues(queueName).Set(float64(weight)) } -// UpdateQueueOverused records if one queue is overused +// UpdateQueueOverused records if one queue is overused. func UpdateQueueOverused(queueName string, overused bool) { var value float64 if overused { @@ -166,22 +166,22 @@ func UpdateQueueOverused(queueName string, overused bool) { queueOverused.WithLabelValues(queueName).Set(value) } -// UpdateQueuePodGroupInqueueCount records the number of Inqueue PodGroup in this queue +// UpdateQueuePodGroupInqueueCount records the number of Inqueue PodGroup in this queue. func UpdateQueuePodGroupInqueueCount(queueName string, count int32) { queuePodGroupInqueue.WithLabelValues(queueName).Set(float64(count)) } -// UpdateQueuePodGroupPendingCount records the number of Pending PodGroup in this queue +// UpdateQueuePodGroupPendingCount records the number of Pending PodGroup in this queue. func UpdateQueuePodGroupPendingCount(queueName string, count int32) { queuePodGroupPending.WithLabelValues(queueName).Set(float64(count)) } -// UpdateQueuePodGroupRunningCount records the number of Running PodGroup in this queue +// UpdateQueuePodGroupRunningCount records the number of Running PodGroup in this queue. func UpdateQueuePodGroupRunningCount(queueName string, count int32) { queuePodGroupRunning.WithLabelValues(queueName).Set(float64(count)) } -// UpdateQueuePodGroupUnknownCount records the number of Unknown PodGroup in this queue +// UpdateQueuePodGroupUnknownCount records the number of Unknown PodGroup in this queue. func UpdateQueuePodGroupUnknownCount(queueName string, count int32) { queuePodGroupUnknown.WithLabelValues(queueName).Set(float64(count)) } diff --git a/pkg/scheduler/plugins/binpack/binpack.go b/pkg/scheduler/plugins/binpack/binpack.go index f54709c32ef..eca28085c8c 100644 --- a/pkg/scheduler/plugins/binpack/binpack.go +++ b/pkg/scheduler/plugins/binpack/binpack.go @@ -85,7 +85,7 @@ type binpackPlugin struct { weight priorityWeight } -//New function returns prioritizePlugin object +// New function returns prioritizePlugin object. func New(aruguments framework.Arguments) framework.Plugin { weight := calculateWeight(aruguments) return &binpackPlugin{weight: weight} @@ -193,10 +193,12 @@ func (bp *binpackPlugin) OnSessionOpen(ssn *framework.Session) { func (bp *binpackPlugin) OnSessionClose(ssn *framework.Session) { } -// BinPackingScore use the best fit polices during scheduling. -// Goals: -// - Schedule Jobs using BestFit Policy using Resource Bin Packing Priority Function -// - Reduce Fragmentation of scarce resources on the Cluster +/* +BinPackingScore use the best fit polices during scheduling. +Goals: +- Schedule Jobs using BestFit Policy using Resource Bin Packing Priority Function +- Reduce Fragmentation of scarce resources on the Cluster. +*/ func BinPackingScore(task *api.TaskInfo, node *api.NodeInfo, weight priorityWeight) float64 { score := 0.0 weightSum := 0 @@ -244,7 +246,7 @@ func BinPackingScore(task *api.TaskInfo, node *api.NodeInfo, weight priorityWeig return score } -// ResourceBinPackingScore calculate the binpack score for resource with provided info +// ResourceBinPackingScore calculate the binpack score for resource with provided info. func ResourceBinPackingScore(requested, capacity, used float64, weight int) float64 { if capacity == 0 || weight == 0 { return 0 diff --git a/pkg/scheduler/plugins/conformance/conformance.go b/pkg/scheduler/plugins/conformance/conformance.go index ff76582f23c..c5f34a751ea 100644 --- a/pkg/scheduler/plugins/conformance/conformance.go +++ b/pkg/scheduler/plugins/conformance/conformance.go @@ -32,7 +32,7 @@ type conformancePlugin struct { pluginArguments framework.Arguments } -// New return conformance plugin +// New return conformance plugin. func New(arguments framework.Arguments) framework.Plugin { return &conformancePlugin{pluginArguments: arguments} } diff --git a/pkg/scheduler/plugins/defaults.go b/pkg/scheduler/plugins/defaults.go index 4aedbdf85d0..882f3f3ccb0 100644 --- a/pkg/scheduler/plugins/defaults.go +++ b/pkg/scheduler/plugins/defaults.go @@ -18,7 +18,7 @@ package plugins import "volcano.sh/volcano/pkg/scheduler/conf" -// ApplyPluginConfDefaults sets option's filed to its default value if not set +// ApplyPluginConfDefaults sets option's filed to its default value if not set. func ApplyPluginConfDefaults(option *conf.PluginOption) { t := true diff --git a/pkg/scheduler/plugins/drf/drf.go b/pkg/scheduler/plugins/drf/drf.go index 412ea23fefc..3edf4cfa6f1 100644 --- a/pkg/scheduler/plugins/drf/drf.go +++ b/pkg/scheduler/plugins/drf/drf.go @@ -51,7 +51,7 @@ type drfPlugin struct { pluginArguments framework.Arguments } -// New return drf plugin +// New return drf plugin. func New(arguments framework.Arguments) framework.Plugin { return &drfPlugin{ totalResource: api.EmptyResource(), @@ -65,7 +65,7 @@ func (drf *drfPlugin) Name() string { return PluginName } -// NamespaceOrderEnabled returns the NamespaceOrder for this plugin is enabled in this session or not +// NamespaceOrderEnabled returns the NamespaceOrder for this plugin is enabled in this session or not. func (drf *drfPlugin) NamespaceOrderEnabled(ssn *framework.Session) bool { for _, tier := range ssn.Tiers { for _, plugin := range tier.Plugins { diff --git a/pkg/scheduler/plugins/gang/gang.go b/pkg/scheduler/plugins/gang/gang.go index ff2e9befb06..e47598e9792 100644 --- a/pkg/scheduler/plugins/gang/gang.go +++ b/pkg/scheduler/plugins/gang/gang.go @@ -38,7 +38,7 @@ type gangPlugin struct { pluginArguments framework.Arguments } -// New return gang plugin +// New return gang plugin. func New(arguments framework.Arguments) framework.Plugin { return &gangPlugin{pluginArguments: arguments} } diff --git a/pkg/scheduler/plugins/nodeorder/nodeorder.go b/pkg/scheduler/plugins/nodeorder/nodeorder.go index 54725875ca5..70056b1b401 100644 --- a/pkg/scheduler/plugins/nodeorder/nodeorder.go +++ b/pkg/scheduler/plugins/nodeorder/nodeorder.go @@ -49,7 +49,7 @@ type nodeOrderPlugin struct { pluginArguments framework.Arguments } -//New function returns prioritizePlugin object +//New function returns prioritizePlugin object. func New(aruguments framework.Arguments) framework.Plugin { return &nodeOrderPlugin{pluginArguments: aruguments} } diff --git a/pkg/scheduler/plugins/predicates/predicates.go b/pkg/scheduler/plugins/predicates/predicates.go index bde08fec717..95d139f1be1 100644 --- a/pkg/scheduler/plugins/predicates/predicates.go +++ b/pkg/scheduler/plugins/predicates/predicates.go @@ -44,7 +44,7 @@ type predicatesPlugin struct { pluginArguments framework.Arguments } -// New return predicate plugin +// New return predicate plugin. func New(arguments framework.Arguments) framework.Plugin { return &predicatesPlugin{pluginArguments: arguments} } diff --git a/pkg/scheduler/plugins/priority/priority.go b/pkg/scheduler/plugins/priority/priority.go index 9f02ee7ffbc..cfc01c6e5b5 100644 --- a/pkg/scheduler/plugins/priority/priority.go +++ b/pkg/scheduler/plugins/priority/priority.go @@ -31,7 +31,7 @@ type priorityPlugin struct { pluginArguments framework.Arguments } -// New return priority plugin +// New return priority plugin. func New(arguments framework.Arguments) framework.Plugin { return &priorityPlugin{pluginArguments: arguments} } diff --git a/pkg/scheduler/plugins/proportion/proportion.go b/pkg/scheduler/plugins/proportion/proportion.go index 4dd3c72024a..1de4ff0807d 100644 --- a/pkg/scheduler/plugins/proportion/proportion.go +++ b/pkg/scheduler/plugins/proportion/proportion.go @@ -46,7 +46,7 @@ type queueAttr struct { request *api.Resource } -// New return proportion action +// New return proportion action. func New(arguments framework.Arguments) framework.Plugin { return &proportionPlugin{ totalResource: api.EmptyResource(), diff --git a/pkg/scheduler/plugins/util/util.go b/pkg/scheduler/plugins/util/util.go index 27202601881..c9546ff5ca8 100644 --- a/pkg/scheduler/plugins/util/util.go +++ b/pkg/scheduler/plugins/util/util.go @@ -29,7 +29,7 @@ import ( "volcano.sh/volcano/pkg/scheduler/framework" ) -// PodLister is used in predicate and nodeorder plugin +// PodLister is used in predicate and nodeorder plugin. type PodLister struct { Session *framework.Session @@ -38,12 +38,12 @@ type PodLister struct { TaskWithAffinity map[api.TaskID]*api.TaskInfo } -// PodAffinityLister is used to list pod with affinity +// PodAffinityLister is used to list pod with affinity. type PodAffinityLister struct { pl *PodLister } -// HaveAffinity checks pod have affinity or not +// HaveAffinity checks pod have affinity or not. func HaveAffinity(pod *v1.Pod) bool { affinity := pod.Spec.Affinity return affinity != nil && @@ -52,7 +52,7 @@ func HaveAffinity(pod *v1.Pod) bool { affinity.PodAntiAffinity != nil) } -// NewPodLister returns a PodLister generate from ssn +// NewPodLister returns a PodLister generate from ssn. func NewPodLister(ssn *framework.Session) *PodLister { pl := &PodLister{ Session: ssn, @@ -87,7 +87,7 @@ func (pl *PodLister) copyTaskPod(task *api.TaskInfo) *v1.Pod { } // GetPod will get pod with proper nodeName, from cache or DeepCopy -// keeping this function read only to avoid concurrent panic of map +// keeping this function read only to avoid concurrent panic of map. func (pl *PodLister) GetPod(task *api.TaskInfo) *v1.Pod { if task.NodeName == task.Pod.Spec.NodeName { return task.Pod @@ -127,7 +127,7 @@ func (pl *PodLister) UpdateTask(task *api.TaskInfo, nodeName string) *v1.Pod { return pod } -// List method is used to list all the pods +// List method is used to list all the pods. func (pl *PodLister) List(selector labels.Selector) ([]*v1.Pod, error) { var pods []*v1.Pod for _, task := range pl.Tasks { @@ -140,7 +140,7 @@ func (pl *PodLister) List(selector labels.Selector) ([]*v1.Pod, error) { return pods, nil } -// FilteredList is used to list all the pods under filter condition +// FilteredList is used to list all the pods under filter condition. func (pl *PodLister) filteredListWithTaskSet(taskSet map[api.TaskID]*api.TaskInfo, podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { var pods []*v1.Pod for _, task := range taskSet { @@ -153,17 +153,17 @@ func (pl *PodLister) filteredListWithTaskSet(taskSet map[api.TaskID]*api.TaskInf return pods, nil } -// FilteredList is used to list all the pods under filter condition +// FilteredList is used to list all the pods under filter condition. func (pl *PodLister) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { return pl.filteredListWithTaskSet(pl.Tasks, podFilter, selector) } -// AffinityFilteredList is used to list all the pods with affinity under filter condition +// AffinityFilteredList is used to list all the pods with affinity under filter condition. func (pl *PodLister) AffinityFilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { return pl.filteredListWithTaskSet(pl.TaskWithAffinity, podFilter, selector) } -// AffinityLister generate a PodAffinityLister following current PodLister +// AffinityLister generate a PodAffinityLister following current PodLister. func (pl *PodLister) AffinityLister() *PodAffinityLister { pal := &PodAffinityLister{ pl: pl, @@ -171,17 +171,17 @@ func (pl *PodLister) AffinityLister() *PodAffinityLister { return pal } -// List method is used to list all the pods +// List method is used to list all the pods. func (pal *PodAffinityLister) List(selector labels.Selector) ([]*v1.Pod, error) { return pal.pl.List(selector) } -// FilteredList is used to list all the pods with affinity under filter condition +// FilteredList is used to list all the pods with affinity under filter condition. func (pal *PodAffinityLister) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { return pal.pl.AffinityFilteredList(podFilter, selector) } -// GenerateNodeMapAndSlice returns the nodeMap and nodeSlice generated from ssn +// GenerateNodeMapAndSlice returns the nodeMap and nodeSlice generated from ssn. func GenerateNodeMapAndSlice(nodes map[string]*api.NodeInfo) (map[string]*schedulernodeinfo.NodeInfo, []*v1.Node) { var nodeMap map[string]*schedulernodeinfo.NodeInfo var nodeSlice []*v1.Node @@ -195,12 +195,12 @@ func GenerateNodeMapAndSlice(nodes map[string]*api.NodeInfo) (map[string]*schedu return nodeMap, nodeSlice } -// CachedNodeInfo is used in nodeorder and predicate plugin +// CachedNodeInfo is used in nodeorder and predicate plugin. type CachedNodeInfo struct { Session *framework.Session } -// GetNodeInfo is used to get info of a particular node +// GetNodeInfo is used to get info of a particular node. func (c *CachedNodeInfo) GetNodeInfo(name string) (*v1.Node, error) { node, found := c.Session.Nodes[name] if !found { @@ -211,12 +211,12 @@ func (c *CachedNodeInfo) GetNodeInfo(name string) (*v1.Node, error) { return node.Node, nil } -// NodeLister is used in nodeorder plugin +// NodeLister is used in nodeorder plugin. type NodeLister struct { Session *framework.Session } -// List is used to list all the nodes +// List is used to list all the nodes. func (nl *NodeLister) List() ([]*v1.Node, error) { var nodes []*v1.Node for _, node := range nl.Session.Nodes { diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 56c49b38690..6bc4bcb9a7d 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -41,7 +41,7 @@ type Scheduler struct { schedulePeriod time.Duration } -// NewScheduler returns a scheduler +// NewScheduler returns a scheduler. func NewScheduler( config *rest.Config, schedulerName string, @@ -59,7 +59,7 @@ func NewScheduler( return scheduler, nil } -// Run runs the Scheduler +// Run runs the Scheduler. func (pc *Scheduler) Run(stopCh <-chan struct{}) { // Start cache for policy. go pc.cache.Run(stopCh) diff --git a/pkg/scheduler/util/assert/assert.go b/pkg/scheduler/util/assert/assert.go index f16d8078cbf..c89cc8cf793 100644 --- a/pkg/scheduler/util/assert/assert.go +++ b/pkg/scheduler/util/assert/assert.go @@ -24,7 +24,7 @@ func init() { } } -// Assert check condition, if condition is false, print message by log or panic +// Assert check condition, if condition is false, print message by log or panic. func Assert(condition bool, message string) { if condition { return @@ -35,7 +35,7 @@ func Assert(condition bool, message string) { klog.Errorf("%s, %s", message, debug.Stack()) } -// Assertf check condition, if condition is false, print message using Assert +// Assertf check condition, if condition is false, print message using Assert. func Assertf(condition bool, format string, args ...interface{}) { if condition { return diff --git a/pkg/scheduler/util/priority_queue.go b/pkg/scheduler/util/priority_queue.go index eb10111a701..4715572aea5 100644 --- a/pkg/scheduler/util/priority_queue.go +++ b/pkg/scheduler/util/priority_queue.go @@ -32,7 +32,7 @@ type priorityQueue struct { lessFn api.LessFn } -// NewPriorityQueue returns a PriorityQueue +// NewPriorityQueue returns a PriorityQueue. func NewPriorityQueue(lessFn api.LessFn) *PriorityQueue { return &PriorityQueue{ queue: priorityQueue{ @@ -42,12 +42,12 @@ func NewPriorityQueue(lessFn api.LessFn) *PriorityQueue { } } -// Push pushes element in the priority Queue +// Push pushes element in the priority Queue. func (q *PriorityQueue) Push(it interface{}) { heap.Push(&q.queue, it) } -// Pop pops element in the priority Queue +// Pop pops element in the priority Queue. func (q *PriorityQueue) Pop() interface{} { if q.Len() == 0 { return nil @@ -56,12 +56,12 @@ func (q *PriorityQueue) Pop() interface{} { return heap.Pop(&q.queue) } -// Empty check if queue is empty +// Empty check if queue is empty. func (q *PriorityQueue) Empty() bool { return q.queue.Len() == 0 } -// Len returns Len of the priority queue +// Len returns Len of the priority queue. func (q *PriorityQueue) Len() int { return q.queue.Len() } diff --git a/pkg/scheduler/util/scheduler_helper.go b/pkg/scheduler/util/scheduler_helper.go index ab1d277f9cf..84bf4280ae1 100644 --- a/pkg/scheduler/util/scheduler_helper.go +++ b/pkg/scheduler/util/scheduler_helper.go @@ -61,7 +61,7 @@ func CalculateNumOfFeasibleNodesToFind(numAllNodes int32) (numNodes int32) { return numNodes } -// PredicateNodes returns the specified number of nodes that fit a task +// PredicateNodes returns the specified number of nodes that fit a task. func PredicateNodes(task *api.TaskInfo, nodes []*api.NodeInfo, fn api.PredicateFn) ([]*api.NodeInfo, *api.FitErrors) { //var workerLock sync.Mutex @@ -117,7 +117,7 @@ func PredicateNodes(task *api.TaskInfo, nodes []*api.NodeInfo, fn api.PredicateF return predicateNodes, fe } -// PrioritizeNodes returns a map whose key is node's score and value are corresponding nodes +// PrioritizeNodes returns a map whose key is node's score and value are corresponding nodes. func PrioritizeNodes(task *api.TaskInfo, nodes []*api.NodeInfo, batchFn api.BatchNodeOrderFn, mapFn api.NodeOrderMapFn, reduceFn api.NodeOrderReduceFn) map[float64][]*api.NodeInfo { pluginNodeScoreMap := map[string]schedulerapi.HostPriorityList{} nodeOrderScoreMap := map[string]float64{} @@ -182,7 +182,7 @@ func PrioritizeNodes(task *api.TaskInfo, nodes []*api.NodeInfo, batchFn api.Batc return nodeScores } -// SortNodes returns nodes by order of score +// SortNodes returns nodes by order of score. func SortNodes(nodeScores map[float64][]*api.NodeInfo) []*api.NodeInfo { var nodesInorder []*api.NodeInfo var keys []float64 @@ -211,7 +211,7 @@ func SelectBestNode(nodeScores map[float64][]*api.NodeInfo) *api.NodeInfo { return bestNodes[rand.Intn(len(bestNodes))] } -// GetNodeList returns values of the map 'nodes' +// GetNodeList returns values of the map 'nodes'. func GetNodeList(nodes map[string]*api.NodeInfo) []*api.NodeInfo { result := make([]*api.NodeInfo, 0, len(nodes)) for _, v := range nodes { @@ -220,7 +220,7 @@ func GetNodeList(nodes map[string]*api.NodeInfo) []*api.NodeInfo { return result } -// ValidateVictims returns an error if the resources of the victims can't satisfy the preemptor +// ValidateVictims returns an error if the resources of the victims can't satisfy the preemptor. func ValidateVictims(preemptor *api.TaskInfo, node *api.NodeInfo, victims []*api.TaskInfo) error { if len(victims) == 0 { return fmt.Errorf("no victims") diff --git a/pkg/scheduler/util/test_utils.go b/pkg/scheduler/util/test_utils.go index b393010f647..76a89f27311 100644 --- a/pkg/scheduler/util/test_utils.go +++ b/pkg/scheduler/util/test_utils.go @@ -30,7 +30,7 @@ import ( "volcano.sh/volcano/pkg/scheduler/api" ) -// BuildResourceList builts resource list object +// BuildResourceList builts resource list object. func BuildResourceList(cpu string, memory string) v1.ResourceList { return v1.ResourceList{ v1.ResourceCPU: resource.MustParse(cpu), @@ -39,7 +39,7 @@ func BuildResourceList(cpu string, memory string) v1.ResourceList { } } -// BuildResourceListWithGPU builts resource list with GPU +// BuildResourceListWithGPU builts resource list with GPU. func BuildResourceListWithGPU(cpu string, memory string, GPU string) v1.ResourceList { return v1.ResourceList{ v1.ResourceCPU: resource.MustParse(cpu), @@ -48,7 +48,7 @@ func BuildResourceListWithGPU(cpu string, memory string, GPU string) v1.Resource } } -// BuildNode builts node object + func BuildNode(name string, alloc v1.ResourceList, labels map[string]string) *v1.Node { return &v1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -62,7 +62,7 @@ func BuildNode(name string, alloc v1.ResourceList, labels map[string]string) *v1 } } -// BuildPod builts Pod object + func BuildPod(namespace, name, nodename string, p v1.PodPhase, req v1.ResourceList, groupName string, labels map[string]string, selector map[string]string) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -91,14 +91,14 @@ func BuildPod(namespace, name, nodename string, p v1.PodPhase, req v1.ResourceLi } } -// FakeBinder is used as fake binder + type FakeBinder struct { sync.Mutex Binds map[string]string Channel chan string } -// Bind used by fake binder struct to bind pods + func (fb *FakeBinder) Bind(p *v1.Pod, hostname string) error { fb.Lock() defer fb.Unlock() @@ -111,7 +111,7 @@ func (fb *FakeBinder) Bind(p *v1.Pod, hostname string) error { return nil } -// FakeEvictor is used as fake evictor + type FakeEvictor struct { sync.Mutex evicts []string @@ -125,7 +125,7 @@ func (fe *FakeEvictor) Evicts() []string { return append([]string{}, fe.evicts...) } -// Evict is used by fake evictor to evict pods + func (fe *FakeEvictor) Evict(p *v1.Pod) error { fe.Lock() defer fe.Unlock() @@ -139,32 +139,32 @@ func (fe *FakeEvictor) Evict(p *v1.Pod) error { return nil } -// FakeStatusUpdater is used for fake status update + type FakeStatusUpdater struct { } -// UpdatePodCondition is a empty function + func (ftsu *FakeStatusUpdater) UpdatePodCondition(pod *v1.Pod, podCondition *v1.PodCondition) (*v1.Pod, error) { // do nothing here return nil, nil } -// UpdatePodGroup is a empty function + func (ftsu *FakeStatusUpdater) UpdatePodGroup(pg *api.PodGroup) (*api.PodGroup, error) { // do nothing here return nil, nil } -// FakeVolumeBinder is used as fake volume binder + type FakeVolumeBinder struct { } -// AllocateVolumes is a empty function + func (fvb *FakeVolumeBinder) AllocateVolumes(task *api.TaskInfo, hostname string) error { return nil } -// BindVolumes is a empty function + func (fvb *FakeVolumeBinder) BindVolumes(task *api.TaskInfo) error { return nil } diff --git a/pkg/version/version.go b/pkg/version/version.go index fbdd2b39b9f..0a99fc4c5f2 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -32,7 +32,7 @@ var ( apiVersion = "v1alpha1" ) -// PrintVersionAndExit prints versions from the array returned by Info() and exit +// PrintVersionAndExit prints versions from the array returned by Info() and exit. func PrintVersionAndExit() { for _, i := range Info(apiVersion) { fmt.Printf("%v\n", i) @@ -40,7 +40,7 @@ func PrintVersionAndExit() { os.Exit(0) } -// Info returns an array of various service versions +// Info returns an array of various service versions. func Info(apiVersion string) []string { return []string{ fmt.Sprintf("API Version: %s", apiVersion), diff --git a/pkg/webhooks/admission/jobs/mutate/mutate_job.go b/pkg/webhooks/admission/jobs/mutate/mutate_job.go index dd3b4ea8a16..c346aead378 100644 --- a/pkg/webhooks/admission/jobs/mutate/mutate_job.go +++ b/pkg/webhooks/admission/jobs/mutate/mutate_job.go @@ -70,7 +70,7 @@ type patchOperation struct { Value interface{} `json:"value,omitempty"` } -// MutateJobs mutate jobs +// MutateJobs mutate jobs. func Jobs(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { klog.V(3).Infof("mutating jobs") diff --git a/pkg/webhooks/admission/jobs/validate/admit_job.go b/pkg/webhooks/admission/jobs/validate/admit_job.go index 60f5968ec24..07a8abb046b 100644 --- a/pkg/webhooks/admission/jobs/validate/admit_job.go +++ b/pkg/webhooks/admission/jobs/validate/admit_job.go @@ -69,7 +69,7 @@ var service = &router.AdmissionService{ var config = &router.AdmissionServiceConfig{} -// AdmitJobs is to admit jobs and return response +// AdmitJobs is to admit jobs and return response. func AdmitJobs(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { klog.V(3).Infof("admitting jobs -- %s", ar.Request.Operation) diff --git a/pkg/webhooks/admission/jobs/validate/util.go b/pkg/webhooks/admission/jobs/validate/util.go index 7363c55c687..071810efb3e 100644 --- a/pkg/webhooks/admission/jobs/validate/util.go +++ b/pkg/webhooks/admission/jobs/validate/util.go @@ -28,7 +28,7 @@ import ( busv1alpha1 "volcano.sh/volcano/pkg/apis/bus/v1alpha1" ) -// policyEventMap defines all policy events and whether to allow external use +// policyEventMap defines all policy events and whether to allow external use. var policyEventMap = map[busv1alpha1.Event]bool{ busv1alpha1.AnyEvent: true, busv1alpha1.PodFailedEvent: true, @@ -39,7 +39,7 @@ var policyEventMap = map[busv1alpha1.Event]bool{ busv1alpha1.CommandIssuedEvent: false, } -// policyActionMap defines all policy actions and whether to allow external use +// policyActionMap defines all policy actions and whether to allow external use. var policyActionMap = map[busv1alpha1.Action]bool{ busv1alpha1.AbortJobAction: true, busv1alpha1.RestartJobAction: true, @@ -158,7 +158,7 @@ func getValidActions() []busv1alpha1.Action { return actions } -// validateIO validates IO configuration +// validateIO validates IO configuration. func validateIO(volumes []batchv1alpha1.VolumeSpec) error { volumeMap := map[string]bool{} for _, volume := range volumes { diff --git a/pkg/webhooks/admission/pods/admit_pod.go b/pkg/webhooks/admission/pods/admit_pod.go index 0572dea4068..dacdd9159c0 100644 --- a/pkg/webhooks/admission/pods/admit_pod.go +++ b/pkg/webhooks/admission/pods/admit_pod.go @@ -63,7 +63,7 @@ var service = &router.AdmissionService{ var config = &router.AdmissionServiceConfig{} -// AdmitPods is to admit pods and return response +// AdmitPods is to admit pods and return response. func AdmitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { klog.V(3).Infof("admitting pods -- %s", ar.Request.Operation) @@ -92,10 +92,12 @@ func AdmitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { return &reviewResponse } -// allow pods to create when -// 1. schedulerName of pod isn't volcano -// 2. pod has Podgroup whose phase isn't Pending -// 3. normal pods whose schedulerName is volcano don't have podgroup +/* +allow pods to create when +1. schedulerName of pod isn't volcano +2. pod has Podgroup whose phase isn't Pending +3. normal pods whose schedulerName is volcano don't have podgroup. +*/ func validatePod(pod *v1.Pod, reviewResponse *v1beta1.AdmissionResponse) string { if pod.Spec.SchedulerName != config.SchedulerName { return "" diff --git a/pkg/webhooks/admission/queues/mutate/mutate_queue.go b/pkg/webhooks/admission/queues/mutate/mutate_queue.go index ea49a3cc4ca..11bc1f5e7c5 100644 --- a/pkg/webhooks/admission/queues/mutate/mutate_queue.go +++ b/pkg/webhooks/admission/queues/mutate/mutate_queue.go @@ -62,7 +62,7 @@ type patchOperation struct { Value interface{} `json:"value,omitempty"` } -// Queues mutate queues +// Queues mutate queues. func Queues(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { klog.V(3).Infof("Mutating %s queue %s.", ar.Request.Operation, ar.Request.Name) diff --git a/pkg/webhooks/admission/queues/validate/validate_queue.go b/pkg/webhooks/admission/queues/validate/validate_queue.go index 56d1aca1303..133d47ea4aa 100644 --- a/pkg/webhooks/admission/queues/validate/validate_queue.go +++ b/pkg/webhooks/admission/queues/validate/validate_queue.go @@ -60,7 +60,7 @@ var service = &router.AdmissionService{ var config = &router.AdmissionServiceConfig{} -// AdmitQueues is to admit queues and return response +// AdmitQueues is to admit queues and return response. func AdmitQueues(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { klog.V(3).Infof("Admitting %s queue %s.", ar.Request.Operation, ar.Request.Name) diff --git a/pkg/webhooks/router/interface.go b/pkg/webhooks/router/interface.go index e3f3de8d4e2..7e3cd4a9c65 100644 --- a/pkg/webhooks/router/interface.go +++ b/pkg/webhooks/router/interface.go @@ -24,7 +24,7 @@ import ( "volcano.sh/volcano/pkg/client/clientset/versioned" ) -//The AdmitFunc returns response +//The AdmitFunc returns response. type AdmitFunc func(v1beta1.AdmissionReview) *v1beta1.AdmissionResponse type AdmissionServiceConfig struct { diff --git a/pkg/webhooks/router/server.go b/pkg/webhooks/router/server.go index 7a28e1844bd..8988d74daaa 100644 --- a/pkg/webhooks/router/server.go +++ b/pkg/webhooks/router/server.go @@ -30,13 +30,13 @@ import ( "volcano.sh/volcano/pkg/webhooks/util" ) -// CONTENTTYPE http content-type +// CONTENTTYPE http content-type. var CONTENTTYPE = "Content-Type" -// APPLICATIONJSON json content +// APPLICATIONJSON json content. var APPLICATIONJSON = "application/json" -// Serve the http request +// Serve the http request. func Serve(w io.Writer, r *http.Request, admit AdmitFunc) { var body []byte if r.Body != nil { diff --git a/pkg/webhooks/schema/schema.go b/pkg/webhooks/schema/schema.go index 2da938a760d..61d9a739937 100644 --- a/pkg/webhooks/schema/schema.go +++ b/pkg/webhooks/schema/schema.go @@ -46,7 +46,7 @@ func addToScheme(scheme *runtime.Scheme) { v1beta1.AddToScheme(scheme) } -//DecodeJob decodes the job using deserializer from the raw object +//DecodeJob decodes the job using deserializer from the raw object. func DecodeJob(object runtime.RawExtension, resource metav1.GroupVersionResource) (*batchv1alpha1.Job, error) { jobResource := metav1.GroupVersionResource{Group: batchv1alpha1.SchemeGroupVersion.Group, Version: batchv1alpha1.SchemeGroupVersion.Version, Resource: "jobs"} raw := object.Raw @@ -85,7 +85,7 @@ func DecodePod(object runtime.RawExtension, resource metav1.GroupVersionResource return &pod, nil } -// DecodeQueue decodes the queue using deserializer from the raw object +// DecodeQueue decodes the queue using deserializer from the raw object. func DecodeQueue(object runtime.RawExtension, resource metav1.GroupVersionResource) (*schedulingv1beta1.Queue, error) { queueResource := metav1.GroupVersionResource{ Group: schedulingv1beta1.SchemeGroupVersion.Group, diff --git a/pkg/webhooks/util/util.go b/pkg/webhooks/util/util.go index e3b4691b845..cbed20edb25 100644 --- a/pkg/webhooks/util/util.go +++ b/pkg/webhooks/util/util.go @@ -22,7 +22,7 @@ import ( "k8s.io/klog" ) -//ToAdmissionResponse updates the admission response with the input error +//ToAdmissionResponse updates the admission response with the input error. func ToAdmissionResponse(err error) *v1beta1.AdmissionResponse { klog.Error(err) return &v1beta1.AdmissionResponse{ diff --git a/test/e2e/cli_util.go b/test/e2e/cli_util.go index b4d8bd3b6e6..2285e7bc98b 100644 --- a/test/e2e/cli_util.go +++ b/test/e2e/cli_util.go @@ -24,7 +24,7 @@ import ( . "github.com/onsi/gomega" ) -//ResumeJob resumes the job in the given namespace +//ResumeJob resumes the job in the given namespace. func ResumeJob(name string, namespace string) string { command := []string{"job", "resume"} Expect(name).NotTo(Equal(""), "Job name should not be empty in Resume job command") @@ -35,7 +35,7 @@ func ResumeJob(name string, namespace string) string { return RunCliCommand(command) } -//SuspendJob suspends the job in the given namepsace +// SuspendJob suspends the job in the given namepsace. func SuspendJob(name string, namespace string) string { command := []string{"job", "suspend"} Expect(name).NotTo(Equal(""), "Job name should not be empty in Suspend job command") @@ -46,7 +46,7 @@ func SuspendJob(name string, namespace string) string { return RunCliCommand(command) } -//ListJobs list all the jobs in the given namespace +// ListJobs list all the jobs in the given namespace. func ListJobs(namespace string) string { command := []string{"job", "list"} if namespace != "" { @@ -55,7 +55,7 @@ func ListJobs(namespace string) string { return RunCliCommand(command) } -//DeleteJob delete the job in the given namespace +// DeleteJob delete the job in the given namespace. func DeleteJob(name string, namespace string) string { command := []string{"job", "delete"} Expect(name).NotTo(Equal(""), "Job name should not be empty in delete job command") @@ -66,7 +66,7 @@ func DeleteJob(name string, namespace string) string { return RunCliCommand(command) } -//RunCliCommand runs the volcano command +// RunCliCommand runs the volcano command. func RunCliCommand(command []string) string { if masterURL() != "" { command = append(command, "--master", masterURL()) @@ -81,7 +81,7 @@ func RunCliCommand(command []string) string { return string(output) } -// RunCliCommandWithoutKubeConfig runs the volcano command +// RunCliCommandWithoutKubeConfig runs the volcano command. func RunCliCommandWithoutKubeConfig(command []string) string { if masterURL() != "" { command = append(command, "--master", masterURL()) diff --git a/test/e2e/util.go b/test/e2e/util.go index e8b504d1875..f5c46f74365 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -94,7 +94,7 @@ func kubeconfigPath(home string) string { return filepath.Join(home, ".kube", "config") // default kubeconfig path is $HOME/.kube/config } -// VolcanoCliBinary function gets the volcano cli binary +// VolcanoCliBinary function gets the volcano cli binary. func VolcanoCliBinary() string { if bin := os.Getenv("VC_BIN"); bin != "" { return filepath.Join(bin, "vcctl") @@ -1072,7 +1072,7 @@ func preparePatchBytesforNode(nodeName string, oldNode *v1.Node, newNode *v1.Nod return patchBytes, nil } -// IsNodeReady function returns the node ready status +// IsNodeReady function returns the node ready status. func IsNodeReady(node *v1.Node) bool { for _, c := range node.Status.Conditions { if c.Type == v1.NodeReady {