Skip to content

Commit 1fb1945

Browse files
committed
fix(pool): close when last handle is dropped, extra check in try_acquire
closes #1928 closes #2375
1 parent c17c59f commit 1fb1945

File tree

3 files changed

+80
-16
lines changed

3 files changed

+80
-16
lines changed

sqlx-core/src/connection.rs

+14-3
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,20 @@ pub trait Connection: Send {
1717

1818
/// Explicitly close this database connection.
1919
///
20-
/// This method is **not required** for safe and consistent operation. However, it is
21-
/// recommended to call it instead of letting a connection `drop` as the database backend
22-
/// will be faster at cleaning up resources.
20+
/// This notifies the database server that the connection is closing so that it can
21+
/// free up any server-side resources in use.
22+
///
23+
/// While connections can simply be dropped to clean up local resources,
24+
/// the `Drop` handler itself cannot notify the server that the connection is being closed
25+
/// because that may require I/O to send a termination message. That can result in a delay
26+
/// before the server learns that the connection is gone, usually from a TCP keepalive timeout.
27+
///
28+
/// Creating and dropping many connections in short order without calling `.close()` may
29+
/// lead to errors from the database server because those senescent connections will still
30+
/// count against any connection limit or quota that is configured.
31+
///
32+
/// Therefore it is recommended to call `.close()` on a connection when you are done using it
33+
/// and to `.await` the result to ensure the termination message is sent.
2334
fn close(self) -> BoxFuture<'static, Result<(), Error>>;
2435

2536
/// Immediately close the connection without sending a graceful shutdown.

sqlx-core/src/pool/inner.rs

+45-13
Original file line numberDiff line numberDiff line change
@@ -81,9 +81,13 @@ impl<DB: Database> PoolInner<DB> {
8181
self.is_closed.load(Ordering::Acquire)
8282
}
8383

84-
pub(super) fn close<'a>(self: &'a Arc<Self>) -> impl Future<Output = ()> + 'a {
84+
fn mark_closed(&self) {
8585
self.is_closed.store(true, Ordering::Release);
8686
self.on_closed.notify(usize::MAX);
87+
}
88+
89+
pub(super) fn close<'a>(self: &'a Arc<Self>) -> impl Future<Output = ()> + 'a {
90+
self.mark_closed();
8791

8892
async move {
8993
for permits in 1..=self.options.max_connections {
@@ -209,19 +213,25 @@ impl<DB: Database> PoolInner<DB> {
209213
}
210214

211215
/// Try to atomically increment the pool size for a new connection.
216+
///
217+
/// Returns `Err` if the pool is at max capacity already or is closed.
212218
pub(super) fn try_increment_size<'a>(
213219
self: &'a Arc<Self>,
214220
permit: AsyncSemaphoreReleaser<'a>,
215221
) -> Result<DecrementSizeGuard<DB>, AsyncSemaphoreReleaser<'a>> {
216222
match self
217223
.size
218224
.fetch_update(Ordering::AcqRel, Ordering::Acquire, |size| {
225+
if self.is_closed() {
226+
return None;
227+
}
228+
219229
size.checked_add(1)
220230
.filter(|size| size <= &self.options.max_connections)
221231
}) {
222232
// we successfully incremented the size
223233
Ok(_) => Ok(DecrementSizeGuard::from_permit((*self).clone(), permit)),
224-
// the pool is at max capacity
234+
// the pool is at max capacity or is closed
225235
Err(_) => Err(permit),
226236
}
227237
}
@@ -258,7 +268,9 @@ impl<DB: Database> PoolInner<DB> {
258268
// we can open a new connection
259269
guard
260270
} else {
261-
// This can happen for a child pool that's at its connection limit.
271+
// This can happen for a child pool that's at its connection limit,
272+
// or if the pool was closed between `acquire_permit()` and
273+
// `try_increment_size()`.
262274
tracing::debug!("woke but was unable to acquire idle connection or open new one; retrying");
263275
// If so, we're likely in the current-thread runtime if it's Tokio
264276
// and so we should yield to let any spawned release_to_pool() tasks
@@ -395,6 +407,8 @@ impl<DB: Database> PoolInner<DB> {
395407

396408
impl<DB: Database> Drop for PoolInner<DB> {
397409
fn drop(&mut self) {
410+
self.mark_closed();
411+
398412
if let Some(parent) = &self.options.parent_pool {
399413
// Release the stolen permits.
400414
parent.0.semaphore.release(self.semaphore.permits());
@@ -461,7 +475,9 @@ async fn check_idle_conn<DB: Database>(
461475
}
462476

463477
fn spawn_maintenance_tasks<DB: Database>(pool: &Arc<PoolInner<DB>>) {
464-
let pool = Arc::clone(&pool);
478+
// NOTE: use `pool_weak` for the maintenance tasks so
479+
// they don't keep `PoolInner` from being dropped.
480+
let pool_weak = Arc::downgrade(&pool);
465481

466482
let period = match (pool.options.max_lifetime, pool.options.idle_timeout) {
467483
(Some(it), None) | (None, Some(it)) => it,
@@ -471,35 +487,51 @@ fn spawn_maintenance_tasks<DB: Database>(pool: &Arc<PoolInner<DB>>) {
471487
(None, None) => {
472488
if pool.options.min_connections > 0 {
473489
crate::rt::spawn(async move {
474-
pool.min_connections_maintenance(None).await;
490+
if let Some(pool) = pool_weak.upgrade() {
491+
pool.min_connections_maintenance(None).await;
492+
}
475493
});
476494
}
477495

478496
return;
479497
}
480498
};
481499

500+
// Immediately cancel this task if the pool is closed.
501+
let mut close_event = pool.close_event();
502+
482503
crate::rt::spawn(async move {
483-
// Immediately cancel this task if the pool is closed.
484-
let _ = pool
485-
.close_event()
504+
let _ = close_event
486505
.do_until(async {
487-
while !pool.is_closed() {
506+
let mut slept = true;
507+
508+
// If the last handle to the pool was dropped while we were sleeping
509+
while let Some(pool) = pool_weak.upgrade() {
510+
if pool.is_closed() {
511+
return;
512+
}
513+
514+
// Don't run the reaper right away.
515+
if slept && !pool.idle_conns.is_empty() {
516+
do_reap(&pool).await;
517+
}
518+
488519
let next_run = Instant::now() + period;
489520

490521
pool.min_connections_maintenance(Some(next_run)).await;
491522

523+
// Don't hold a reference to the pool while sleeping.
524+
drop(pool);
525+
492526
if let Some(duration) = next_run.checked_duration_since(Instant::now()) {
493527
// `async-std` doesn't have a `sleep_until()`
494528
crate::rt::sleep(duration).await;
495529
} else {
530+
// `next_run` is in the past, just yield.
496531
crate::rt::yield_now().await;
497532
}
498533

499-
// Don't run the reaper right away.
500-
if !pool.idle_conns.is_empty() {
501-
do_reap(&pool).await;
502-
}
534+
slept = true;
503535
}
504536
})
505537
.await;

sqlx-core/src/pool/mod.rs

+21
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,27 @@ pub use self::maybe::MaybePoolConnection;
142142
///
143143
/// [web::Data]: https://docs.rs/actix-web/3/actix_web/web/struct.Data.html
144144
///
145+
/// ### Note: Drop Behavior
146+
/// Due to a lack of async `Drop`, dropping the last `Pool` handle may not immediately clean
147+
/// up connections by itself. The connections will be dropped locally, which is sufficient for
148+
/// SQLite, but for client/server databases like MySQL and Postgres, that only closes the
149+
/// client side of the connection. The server will not know the connection is closed until
150+
/// potentially much later: this is usually dictated by the TCP keepalive timeout in the server
151+
/// settings.
152+
///
153+
/// Because the connection may not be cleaned up immediately on the server side, you may run
154+
/// into errors regarding connection limits if you are creating and dropping many pools in short
155+
/// order.
156+
///
157+
/// We recommend calling [`.close().await`] to gracefully close the pool and its connections
158+
/// when you are done using it. This will also wake any tasks that are waiting on an `.acquire()`
159+
/// call, so for long-lived applications it's a good idea to call `.close()` during shutdown.
160+
///
161+
/// If you're writing tests, consider using `#[sqlx::test]` which handles the lifetime of
162+
/// the pool for you.
163+
///
164+
/// [`.close().await`]: Pool::close
165+
///
145166
/// ### Why Use a Pool?
146167
///
147168
/// A single database connection (in general) cannot be used by multiple threads simultaneously

0 commit comments

Comments
 (0)