Skip to content

Commit

Permalink
drm/i915: Split i915_active.mutex into an irq-safe spinlock for the r…
Browse files Browse the repository at this point in the history
…btree

As we want to be able to run inside atomic context for retiring the
i915_active, and we are no longer allowed to abuse mutex_trylock, split
the tree management portion of i915_active.mutex into an irq-safe
spinlock.

References: a0855d2 ("locking/mutex: Complain upon mutex API misuse in IRQ contexts")
References: https://bugs.freedesktop.org/show_bug.cgi?id=111626
Fixes: 274cbf2 ("drm/i915: Push the i915_active.retire into a worker")
Signed-off-by: Chris Wilson <[email protected]>
Cc: Tvrtko Ursulin <[email protected]>
Cc: Matthew Auld <[email protected]>
Reviewed-by: Tvrtko Ursulin <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
(cherry picked from commit c9ad602)
Signed-off-by: Joonas Lahtinen <[email protected]>
  • Loading branch information
ickle authored and jlahtine-intel committed Nov 20, 2019
1 parent fa039b9 commit 093b922
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 29 deletions.
57 changes: 28 additions & 29 deletions drivers/gpu/drm/i915/i915_active.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)

static void debug_active_activate(struct i915_active *ref)
{
lockdep_assert_held(&ref->mutex);
spin_lock_irq(&ref->tree_lock);
if (!atomic_read(&ref->count)) /* before the first inc */
debug_object_activate(ref, &active_debug_desc);
spin_unlock_irq(&ref->tree_lock);
}

static void debug_active_deactivate(struct i915_active *ref)
{
lockdep_assert_held(&ref->mutex);
lockdep_assert_held(&ref->tree_lock);
if (!atomic_read(&ref->count)) /* after the last dec */
debug_object_deactivate(ref, &active_debug_desc);
}
Expand Down Expand Up @@ -128,36 +129,34 @@ __active_retire(struct i915_active *ref)
{
struct active_node *it, *n;
struct rb_root root;
bool retire = false;
unsigned long flags;

lockdep_assert_held(&ref->mutex);
GEM_BUG_ON(i915_active_is_idle(ref));

/* return the unused nodes to our slabcache -- flushing the allocator */
if (atomic_dec_and_test(&ref->count)) {
debug_active_deactivate(ref);
root = ref->tree;
ref->tree = RB_ROOT;
ref->cache = NULL;
retire = true;
}

mutex_unlock(&ref->mutex);
if (!retire)
if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
return;

GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
GEM_BUG_ON(i915_active_fence_isset(&it->base));
kmem_cache_free(global.slab_cache, it);
}
debug_active_deactivate(ref);

root = ref->tree;
ref->tree = RB_ROOT;
ref->cache = NULL;

spin_unlock_irqrestore(&ref->tree_lock, flags);

/* After the final retire, the entire struct may be freed */
if (ref->retire)
ref->retire(ref);

/* ... except if you wait on it, you must manage your own references! */
wake_up_var(ref);

rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
GEM_BUG_ON(i915_active_fence_isset(&it->base));
kmem_cache_free(global.slab_cache, it);
}
}

static void
Expand All @@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
if (atomic_add_unless(&ref->count, -1, 1))
return;

mutex_lock(&ref->mutex);
__active_retire(ref);
}

Expand All @@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
if (atomic_add_unless(&ref->count, -1, 1))
return;

/* If we are inside interrupt context (fence signaling), defer */
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
!mutex_trylock(&ref->mutex)) {
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
queue_work(system_unbound_wq, &ref->work);
return;
}
Expand Down Expand Up @@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
if (!prealloc)
return NULL;

mutex_lock(&ref->mutex);
spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));

parent = NULL;
Expand Down Expand Up @@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)

out:
ref->cache = node;
mutex_unlock(&ref->mutex);
spin_unlock_irq(&ref->tree_lock);

BUILD_BUG_ON(offsetof(typeof(*node), base));
return &node->base;
Expand All @@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
if (bits & I915_ACTIVE_MAY_SLEEP)
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;

spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT;
ref->cache = NULL;

init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0);
__mutex_init(&ref->mutex, "i915_active", key);
Expand Down Expand Up @@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
if (RB_EMPTY_ROOT(&ref->tree))
return NULL;

mutex_lock(&ref->mutex);
spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));

/*
Expand Down Expand Up @@ -575,15 +573,15 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
goto match;
}

mutex_unlock(&ref->mutex);
spin_unlock_irq(&ref->tree_lock);

return NULL;

match:
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
if (p == &ref->cache->node)
ref->cache = NULL;
mutex_unlock(&ref->mutex);
spin_unlock_irq(&ref->tree_lock);

return rb_entry(p, struct active_node, node);
}
Expand Down Expand Up @@ -664,6 +662,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
void i915_active_acquire_barrier(struct i915_active *ref)
{
struct llist_node *pos, *next;
unsigned long flags;

GEM_BUG_ON(i915_active_is_idle(ref));

Expand All @@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
* populated by i915_request_add_active_barriers() to point to the
* request that will eventually release them.
*/
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
struct active_node *node = barrier_from_ll(pos);
struct intel_engine_cs *engine = barrier_to_engine(node);
Expand All @@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
mutex_unlock(&ref->mutex);
spin_unlock_irqrestore(&ref->tree_lock, flags);
}

void i915_request_add_active_barriers(struct i915_request *rq)
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/i915/i915_active_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ struct i915_active {
atomic_t count;
struct mutex mutex;

spinlock_t tree_lock;
struct active_node *cache;
struct rb_root tree;

Expand Down

0 comments on commit 093b922

Please sign in to comment.