Skip to content

Commit

Permalink
Add "per node" strategy (fork) (#4)
Browse files Browse the repository at this point in the history
* Add per node allocation algorithm

Signed-off-by: Matej Gera <[email protected]>

* More docs

Signed-off-by: Matej Gera <[email protected]>

* Add tests

Signed-off-by: Matej Gera <[email protected]>

* Update APIs and docs with new strategy

Signed-off-by: Matej Gera <[email protected]>

* Add changelog

Signed-off-by: Matej Gera <[email protected]>

* Fix lint

Signed-off-by: Matej Gera <[email protected]>

* Add more labels to match node name

Signed-off-by: Matej Gera <[email protected]>

* Add SHA tag to Docker image

Signed-off-by: Matej Gera <[email protected]>

* Add node name to Collector struct

Signed-off-by: Matej Gera <[email protected]>

---------

Signed-off-by: Matej Gera <[email protected]>
  • Loading branch information
matej-g authored Dec 11, 2023
1 parent 9360d15 commit 9517bd6
Show file tree
Hide file tree
Showing 15 changed files with 435 additions and 19 deletions.
16 changes: 16 additions & 0 deletions .chloggen/per-node-allocation-strategy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action)
component: target allocator

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Add new "per node" allocation strategy to target allocator. This strategy will allocate targets to nodes on which given target resides. It should only be used conjunction with the daemonset mode.

# One or more tracking issues related to the change
issues: [1828]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
1 change: 1 addition & 0 deletions .github/workflows/publish-target-allocator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ jobs:
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{raw}}
type=ref,event=branch
type=sha
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
Expand Down
5 changes: 4 additions & 1 deletion apis/v1alpha1/allocation_strategy.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ package v1alpha1

type (
// OpenTelemetryTargetAllocatorAllocationStrategy represent which strategy to distribute target to each collector
// +kubebuilder:validation:Enum=least-weighted;consistent-hashing
// +kubebuilder:validation:Enum=least-weighted;consistent-hashing;per-node
OpenTelemetryTargetAllocatorAllocationStrategy string
)

Expand All @@ -26,4 +26,7 @@ const (

// OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing targets will be consistently added to collectors, which allows a high-availability setup.
OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing OpenTelemetryTargetAllocatorAllocationStrategy = "consistent-hashing"

// OpenTelemetryTargetAllocatorAllocationStrategyPerNode targets will be assigned to the collector on the node they reside on (use only with daemon set).
OpenTelemetryTargetAllocatorAllocationStrategyPerNode OpenTelemetryTargetAllocatorAllocationStrategy = "per-node"
)
3 changes: 2 additions & 1 deletion apis/v1alpha1/opentelemetrycollector_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,8 @@ type OpenTelemetryTargetAllocator struct {
// +optional
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// AllocationStrategy determines which strategy the target allocator should use for allocation.
// The current options are least-weighted and consistent-hashing. The default option is least-weighted
// The current options are least-weighted, consistent-hashing and per-node. The default is
// least-weighted.
// +optional
AllocationStrategy OpenTelemetryTargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
// FilterStrategy determines how to filter targets before allocating them among the collectors.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4832,11 +4832,12 @@ spec:
allocationStrategy:
description: AllocationStrategy determines which strategy the
target allocator should use for allocation. The current options
are least-weighted and consistent-hashing. The default option
is least-weighted
are least-weighted, consistent-hashing and per-node. The default
is least-weighted.
enum:
- least-weighted
- consistent-hashing
- per-node
type: string
enabled:
description: Enabled indicates whether to use a target allocation
Expand Down
1 change: 1 addition & 0 deletions cmd/otel-allocator/allocation/allocatortest.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ func MakeNCollectors(n int, startingIndex int) map[string]*Collector {
toReturn[collector] = &Collector{
Name: collector,
NumTargets: 0,
Node: fmt.Sprintf("node-%d", i),
}
}
return toReturn
Expand Down
2 changes: 1 addition & 1 deletion cmd/otel-allocator/allocation/consistent_hashing.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func (c *consistentHashingAllocator) handleCollectors(diff diff.Changes[*Collect
}
// Insert the new collectors
for _, i := range diff.Additions() {
c.collectors[i.Name] = NewCollector(i.Name)
c.collectors[i.Name] = NewCollector(i.Name, i.Node)
c.consistentHasher.Add(c.collectors[i.Name])
}

Expand Down
2 changes: 1 addition & 1 deletion cmd/otel-allocator/allocation/least_weighted.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ func (allocator *leastWeightedAllocator) handleCollectors(diff diff.Changes[*Col
}
// Insert the new collectors
for _, i := range diff.Additions() {
allocator.collectors[i.Name] = NewCollector(i.Name)
allocator.collectors[i.Name] = NewCollector(i.Name, i.Node)
}
if allocateTargets {
for _, item := range allocator.targetItems {
Expand Down
304 changes: 304 additions & 0 deletions cmd/otel-allocator/allocation/per_node.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,304 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package allocation

import (
"sync"

"github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/diff"
"github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/target"

"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
)

var _ Allocator = &perNodeAllocator{}

const perNodeStrategyName = "per-node"

// perNodeAllocator makes decisions to distribute work among
// a number of OpenTelemetry collectors based on the node on which
// the collector is running. This allocator should be used only when
// collectors are running as daemon set (agent) on each node.
// Users need to call SetTargets when they have new targets in their
// clusters and call SetCollectors when the collectors have changed.
type perNodeAllocator struct {
// m protects collectors and targetItems for concurrent use.
m sync.RWMutex
// collectors is a map from a Collector's name to a Collector instance
collectors map[string]*Collector
// targetItems is a map from a target item's hash to the target items allocated state
targetItems map[string]*target.Item

// collectorKey -> job -> target item hash -> true
targetItemsPerJobPerCollector map[string]map[string]map[string]bool

log logr.Logger

filter Filter
}

// nodeLabels are labels that are used to identify the node on which the given
// target is residing. To learn more about these labels, please refer to:
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config
var nodeLabels = []model.LabelName{
"__meta_kubernetes_pod_node_name",
"__meta_kubernetes_node_name",
"__meta_kubernetes_endpoint_node_name",
}

// SetCollectors sets the set of collectors with key=collectorName, value=Collector object.
// This method is called when Collectors are added or removed.
func (allocator *perNodeAllocator) SetCollectors(collectors map[string]*Collector) {
timer := prometheus.NewTimer(TimeToAssign.WithLabelValues("SetCollectors", perNodeStrategyName))
defer timer.ObserveDuration()

CollectorsAllocatable.WithLabelValues(perNodeStrategyName).Set(float64(len(collectors)))
if len(collectors) == 0 {
allocator.log.Info("No collector instances present")
return
}

allocator.m.Lock()
defer allocator.m.Unlock()

// Check for collector changes
collectorsDiff := diff.Maps(allocator.collectors, collectors)
if len(collectorsDiff.Additions()) != 0 || len(collectorsDiff.Removals()) != 0 {
allocator.handleCollectors(collectorsDiff)
}
}

// handleCollectors receives the new and removed collectors and reconciles the current state.
// Any removals are removed from the allocator's collectors. New collectors are added to the allocator's collector map.
func (allocator *perNodeAllocator) handleCollectors(diff diff.Changes[*Collector]) {
// Clear removed collectors
for _, k := range diff.Removals() {
delete(allocator.collectors, k.Name)
delete(allocator.targetItemsPerJobPerCollector, k.Name)
TargetsPerCollector.WithLabelValues(k.Name, perNodeStrategyName).Set(0)
}

// Insert the new collectors
for _, i := range diff.Additions() {
allocator.collectors[i.Name] = NewCollector(i.Name, i.Node)
}
}

// SetTargets accepts a list of targets that will be used to make
// load balancing decisions. This method should be called when there are
// new targets discovered or existing targets are shutdown.
func (allocator *perNodeAllocator) SetTargets(targets map[string]*target.Item) {
timer := prometheus.NewTimer(TimeToAssign.WithLabelValues("SetTargets", perNodeStrategyName))
defer timer.ObserveDuration()

if allocator.filter != nil {
targets = allocator.filter.Apply(targets)
}
RecordTargetsKept(targets)

allocator.m.Lock()
defer allocator.m.Unlock()

if len(allocator.collectors) == 0 {
allocator.log.Info("No collector instances present, saving targets to allocate to collector(s)")
// If there were no targets discovered previously, assign this as the new set of target items
if len(allocator.targetItems) == 0 {
allocator.log.Info("Not discovered any targets previously, saving targets found to the targetItems set")
for k, item := range targets {
allocator.targetItems[k] = item
}
} else {
// If there were previously discovered targets, add or remove accordingly
targetsDiffEmptyCollectorSet := diff.Maps(allocator.targetItems, targets)

// Check for additions
if len(targetsDiffEmptyCollectorSet.Additions()) > 0 {
allocator.log.Info("New targets discovered, adding new targets to the targetItems set")
for k, item := range targetsDiffEmptyCollectorSet.Additions() {
// Do nothing if the item is already there
if _, ok := allocator.targetItems[k]; ok {
continue
} else {
// Add item to item pool
allocator.targetItems[k] = item
}
}
}

// Check for deletions
if len(targetsDiffEmptyCollectorSet.Removals()) > 0 {
allocator.log.Info("Targets removed, Removing targets from the targetItems set")
for k := range targetsDiffEmptyCollectorSet.Removals() {
// Delete item from target items
delete(allocator.targetItems, k)
}
}
}
return
}
// Check for target changes
targetsDiff := diff.Maps(allocator.targetItems, targets)
// If there are any additions or removals
if len(targetsDiff.Additions()) != 0 || len(targetsDiff.Removals()) != 0 {
allocator.handleTargets(targetsDiff)
}
}

// handleTargets receives the new and removed targets and reconciles the current state.
// Any removals are removed from the allocator's targetItems and unassigned from the corresponding collector.
// Any net-new additions are assigned to the collector on the same node as the target.
func (allocator *perNodeAllocator) handleTargets(diff diff.Changes[*target.Item]) {
// Check for removals
for k, item := range allocator.targetItems {
// if the current item is in the removals list
if _, ok := diff.Removals()[k]; ok {
c, ok := allocator.collectors[item.CollectorName]
if !ok {
continue
}
c.NumTargets--
delete(allocator.targetItems, k)
delete(allocator.targetItemsPerJobPerCollector[item.CollectorName][item.JobName], item.Hash())
TargetsPerCollector.WithLabelValues(item.CollectorName, perNodeStrategyName).Set(float64(c.NumTargets))
}
}

// Check for additions
for k, item := range diff.Additions() {
// Do nothing if the item is already there
if _, ok := allocator.targetItems[k]; ok {
continue
} else {
// Add item to item pool and assign a collector
allocator.addTargetToTargetItems(item)
}
}
}

// addTargetToTargetItems assigns a target to the collector and adds it to the allocator's targetItems
// This method is called from within SetTargets and SetCollectors, which acquire the needed lock.
// This is only called after the collectors are cleared or when a new target has been found in the tempTargetMap.
// INVARIANT: allocator.collectors must have at least 1 collector set.
// NOTE: by not creating a new target item, there is the potential for a race condition where we modify this target
// item while it's being encoded by the server JSON handler.
// Also, any targets that cannot be assigned to a collector due to no matching node name will be dropped.
func (allocator *perNodeAllocator) addTargetToTargetItems(tg *target.Item) {
chosenCollector := allocator.findCollector(tg.Labels)
if chosenCollector == nil {
allocator.log.V(2).Info("Couldn't find a collector for the target item", "item", tg, "collectors", allocator.collectors)
return
}
tg.CollectorName = chosenCollector.Name
allocator.targetItems[tg.Hash()] = tg
allocator.addCollectorTargetItemMapping(tg)
chosenCollector.NumTargets++
TargetsPerCollector.WithLabelValues(chosenCollector.Name, leastWeightedStrategyName).Set(float64(chosenCollector.NumTargets))
}

// findCollector finds the collector that matches the node of the target, on the basis of the
// pod node label.
// This method is called from within SetTargets and SetCollectors, whose caller
// acquires the needed lock. This method assumes there are is at least 1 collector set.
func (allocator *perNodeAllocator) findCollector(labels model.LabelSet) *Collector {
var col *Collector
for _, v := range allocator.collectors {
// Try to match against a node label.
for _, l := range nodeLabels {
if nodeNameLabelValue, ok := labels[l]; ok {
if v.Node == string(nodeNameLabelValue) {
col = v
break
}
}
}
}

return col
}

// addCollectorTargetItemMapping keeps track of which collector has which jobs and targets
// this allows the allocator to respond without any extra allocations to http calls. The caller of this method
// has to acquire a lock.
func (allocator *perNodeAllocator) addCollectorTargetItemMapping(tg *target.Item) {
if allocator.targetItemsPerJobPerCollector[tg.CollectorName] == nil {
allocator.targetItemsPerJobPerCollector[tg.CollectorName] = make(map[string]map[string]bool)
}
if allocator.targetItemsPerJobPerCollector[tg.CollectorName][tg.JobName] == nil {
allocator.targetItemsPerJobPerCollector[tg.CollectorName][tg.JobName] = make(map[string]bool)
}
allocator.targetItemsPerJobPerCollector[tg.CollectorName][tg.JobName][tg.Hash()] = true
}

// TargetItems returns a shallow copy of the targetItems map.
func (allocator *perNodeAllocator) TargetItems() map[string]*target.Item {
allocator.m.RLock()
defer allocator.m.RUnlock()
targetItemsCopy := make(map[string]*target.Item)
for k, v := range allocator.targetItems {
targetItemsCopy[k] = v
}
return targetItemsCopy
}

// Collectors returns a shallow copy of the collectors map.
func (allocator *perNodeAllocator) Collectors() map[string]*Collector {
allocator.m.RLock()
defer allocator.m.RUnlock()
collectorsCopy := make(map[string]*Collector)
for k, v := range allocator.collectors {
collectorsCopy[k] = v
}
return collectorsCopy
}

func (allocator *perNodeAllocator) GetTargetsForCollectorAndJob(collector string, job string) []*target.Item {
allocator.m.RLock()
defer allocator.m.RUnlock()
if _, ok := allocator.targetItemsPerJobPerCollector[collector]; !ok {
return []*target.Item{}
}
if _, ok := allocator.targetItemsPerJobPerCollector[collector][job]; !ok {
return []*target.Item{}
}
targetItemsCopy := make([]*target.Item, len(allocator.targetItemsPerJobPerCollector[collector][job]))
index := 0
for targetHash := range allocator.targetItemsPerJobPerCollector[collector][job] {
targetItemsCopy[index] = allocator.targetItems[targetHash]
index++
}
return targetItemsCopy
}

// SetFilter sets the filtering hook to use.
func (allocator *perNodeAllocator) SetFilter(filter Filter) {
allocator.filter = filter
}

func newPerNodeAllocator(log logr.Logger, opts ...AllocationOption) Allocator {
pnAllocator := &perNodeAllocator{
log: log,
collectors: make(map[string]*Collector),
targetItems: make(map[string]*target.Item),
targetItemsPerJobPerCollector: make(map[string]map[string]map[string]bool),
}

for _, opt := range opts {
opt(pnAllocator)
}

return pnAllocator
}
Loading

0 comments on commit 9517bd6

Please sign in to comment.