Skip to content

Commit

Permalink
Change our std::recursive_mutex into a recursive spin-lock.
Browse files Browse the repository at this point in the history
This provides notably better performace since it does not
cause threads to get descheduled behind our back.
  • Loading branch information
AWoloszyn committed Jan 15, 2019
1 parent 64b7de0 commit 51767ce
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 5 deletions.
61 changes: 61 additions & 0 deletions core/cc/recursive_spinlock.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
/*
* Copyright (C) 2019 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <atomic>
#include <thread>

#ifndef CORE_RECURSIVE_SPINLOCK_H__
#define CORE_RECURSIVE_SPINLOCK_H__

namespace core {
static const std::thread::id kUnlocked;
// RecursiveSpinLock is a spin lock implemented with atomic variable and
// operations. Mutiple calls to Lock in a single thread are valid.
class RecursiveSpinLock {
public:
RecursiveSpinLock() : owning_id_(kUnlocked), count_(0) {}
// Lock acquires the lock.
void Lock() {
static thread_local std::thread::id this_thread =
std::this_thread::get_id();
// If ownining_id_ != this_thread, then it can never become this thread,
// behind out backs.
if (owning_id_.load() != this_thread) {
std::thread::id id = kUnlocked;
while (!owning_id_.compare_exchange_weak(id, this_thread,
std::memory_order_acquire,
std::memory_order_relaxed)) {
id = kUnlocked;
}
}
++count_;
}
// Unlock releases the lock.
void Unlock() {
if (--count_ == 0) {
owning_id_.store(kUnlocked, std::memory_order_release);
}
}

private:
std::atomic<std::thread::id> owning_id_;
// count_ does not have to be atomic, since it is only
// ever modified when locked.
size_t count_;
};
} // namespace core

#endif // CORE_RECURSIVE_SPINLOCK_H__
4 changes: 2 additions & 2 deletions gapii/cc/spy_base.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ void SpyBase::init(CallObserver* observer) {
mIsSuspended = false;
}

void SpyBase::lock(CallObserver* observer) { mMutex.lock(); }
void SpyBase::lock(CallObserver* observer) { mSpinLock.Lock(); }

void SpyBase::unlock() { mMutex.unlock(); }
void SpyBase::unlock() { mSpinLock.Unlock(); }

void SpyBase::abort() {
GAPID_DEBUG("Command aborted");
Expand Down
7 changes: 4 additions & 3 deletions gapii/cc/spy_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include "core/cc/assert.h"
#include "core/cc/id.h"
#include "core/cc/interval_list.h"
#include "core/cc/recursive_spinlock.h"
#include "core/cc/vector.h"

#include "core/memory/arena/cc/arena.h"
Expand Down Expand Up @@ -211,9 +212,9 @@ class SpyBase {
std::unordered_map<core::Id, int64_t> mResources;
std::mutex mResourcesMutex;

// The mutex that should be locked for the duration of each of the intercepted
// commands.
std::recursive_mutex mMutex;
// The spinlock that should be locked for the duration of each of the
// intercepted commands.
core::RecursiveSpinLock mSpinLock;

// True if we should observe the application pool.
bool mObserveApplicationPool;
Expand Down

0 comments on commit 51767ce

Please sign in to comment.