Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

KAFKA-9449: Adds support for closing the producer's BufferPool. #7967

Merged
merged 4 commits into from
Jan 17, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;

import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.metrics.Metrics;
Expand Down Expand Up @@ -55,6 +56,7 @@ public class BufferPool {
private final Metrics metrics;
private final Time time;
private final Sensor waitTime;
private boolean closed;

/**
* Create a new buffer pool
Expand Down Expand Up @@ -82,6 +84,7 @@ public BufferPool(long memory, int poolableSize, Metrics metrics, Time time, Str
metricGrpName,
"The total time an appender waits for space allocation.");
this.waitTime.add(new Meter(TimeUnit.NANOSECONDS, rateMetricName, totalMetricName));
this.closed = false;
}

/**
Expand All @@ -104,6 +107,12 @@ public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedEx

ByteBuffer buffer = null;
this.lock.lock();

if (this.closed) {
this.lock.unlock();
throw new KafkaException("Producer closed while allocating memory");
}

try {
// check if we have a free buffer of the right size pooled
if (size == poolableSize && !this.free.isEmpty())
Expand Down Expand Up @@ -138,6 +147,9 @@ public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedEx
recordWaitTime(timeNs);
}

if (this.closed)
throw new KafkaException("Producer closed while allocating memory");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bdbyrne @hachikuji Currently on Producer.send our javadoc mentioned "If a Kafka related error occurs that does not belong to the public API exceptions." for KafkaException and most callers default it to fatal. However if we consider the pattern where thread A blocked on send#bufferPool, and then thread B calls producer.close which would cause thread A to be unblocked by throwing a KafkaException to be a recommended pattern, should we use a different exception than KafkaException to differentiate it with other other fatal exceptions?

I'm thinking for Streams if we eventually want to move to this pattern, i.e. the stream thread blocked on producer.send while the closing thread calls producer.close then stream thread would throw KafkaException that in turn would be interpreted as fatal and then the stream thread tries to shutdown itself as "shutdown unclean" whereas here since we are indeed closing we should just proceed with "shutdown clean" --- of course this is still doable with some extra check but I'm wondering if such complexity would be universal for any callers like Streams.


if (waitingTimeElapsed) {
throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms.");
}
Expand Down Expand Up @@ -316,4 +328,19 @@ public long totalMemory() {
Deque<Condition> waiters() {
return this.waiters;
}

/**
* Closes the buffer pool. Memory will be prevented from being allocated, but may be deallocated. All allocations
* awaiting available memory will be notified to abort.
*/
public void close() {
this.lock.lock();
this.closed = true;
try {
for (Condition waiter : this.waiters)
waiter.signal();
} finally {
this.lock.unlock();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -802,6 +802,7 @@ public void unmutePartition(TopicPartition tp, long throttleUntilTimeMs) {
*/
public void close() {
this.closed = true;
this.free.close();
}

/*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
*/
package org.apache.kafka.clients.producer.internals;

import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.utils.MockTime;
Expand Down Expand Up @@ -232,7 +233,7 @@ public void testCleanupMemoryAvailabilityWaiterOnInterruption() throws Exception
// both the allocate() called by threads t1 and t2 should have been interrupted and the waiters queue should be empty
assertEquals(pool.queued(), 0);
}

@Test
public void testCleanupMemoryAvailabilityOnMetricsException() throws Exception {
BufferPool bufferPool = spy(new BufferPool(2, 1, new Metrics(), time, metricGroup));
Expand Down Expand Up @@ -377,4 +378,58 @@ public void run() {
}
}

@Test
public void testCloseAllocations() throws Exception {
BufferPool pool = new BufferPool(10, 1, metrics, Time.SYSTEM, metricGroup);
ByteBuffer buffer = pool.allocate(1, maxBlockTimeMs);

// Close the buffer pool. This should prevent any further allocations.
pool.close();

try {
pool.allocate(1, maxBlockTimeMs);
fail("Should have thrown KafkaException");
} catch (KafkaException e) {
// Expected.
}

// Ensure deallocation still works.
pool.deallocate(buffer);
}

@Test
public void testCloseNotifyWaiters() throws Exception {
BufferPool pool = new BufferPool(10, 1, metrics, Time.SYSTEM, metricGroup);
ByteBuffer buffer = pool.allocate(10, Long.MAX_VALUE);

CountDownLatch waiter1 = asyncAllocateClose(pool, 10);
CountDownLatch waiter2 = asyncAllocateClose(pool, 10);

assertEquals("Allocation shouldn't have happened yet, waiting on memory", 2L, waiter1.getCount() + waiter2.getCount());

// Close the buffer pool. This should notify all waiters.
pool.close();

assertTrue("Allocation should fail soon after close", waiter1.await(1, TimeUnit.SECONDS) && waiter2.await(1, TimeUnit.SECONDS));

pool.deallocate(buffer);
}

private CountDownLatch asyncAllocateClose(final BufferPool pool, final int size) {
final CountDownLatch completed = new CountDownLatch(1);
Thread thread = new Thread() {
public void run() {
try {
pool.allocate(size, maxBlockTimeMs);
fail("Unexpected allocation");
} catch (KafkaException e) {
completed.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
};
thread.start();
return completed;
}
}