Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Avoid panic when using dedicated client after being recycled, return an error instead #593

Merged
merged 3 commits into from
Jul 26, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 17 additions & 6 deletions client.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,9 @@ func (c *dedicatedSingleClient) B() Builder {

func (c *dedicatedSingleClient) Do(ctx context.Context, cmd Completed) (resp RedisResult) {
retry:
c.check()
if err := c.check(); err != nil {
return newErrResult(err)
}
resp = c.wire.Do(ctx, cmd)
if c.retry && cmd.IsReadOnly() && isRetryable(resp.NonRedisError(), c.wire, ctx) {
goto retry
Expand All @@ -197,7 +199,9 @@ func (c *dedicatedSingleClient) DoMulti(ctx context.Context, multi ...Completed)
retryable = allReadOnly(multi)
}
retry:
c.check()
if err := c.check(); err != nil {
return fillErrs(len(multi), err)
}
resp = c.wire.DoMulti(ctx, multi...).s
if retryable && anyRetryable(resp, c.wire, ctx) {
goto retry
Expand All @@ -212,7 +216,9 @@ retry:

func (c *dedicatedSingleClient) Receive(ctx context.Context, subscribe Completed, fn func(msg PubSubMessage)) (err error) {
retry:
c.check()
if err := c.check(); err != nil {
return err
}
err = c.wire.Receive(ctx, subscribe, fn)
if c.retry {
if _, ok := err.(*RedisError); !ok && isRetryable(err, c.wire, ctx) {
Expand All @@ -226,7 +232,11 @@ retry:
}

func (c *dedicatedSingleClient) SetPubSubHooks(hooks PubSubHooks) <-chan error {
c.check()
if err := c.check(); err != nil {
ch := make(chan error, 1)
ch <- err
return ch
}
return c.wire.SetPubSubHooks(hooks)
}

Expand All @@ -235,10 +245,11 @@ func (c *dedicatedSingleClient) Close() {
c.release()
}

func (c *dedicatedSingleClient) check() {
func (c *dedicatedSingleClient) check() error {
if atomic.LoadUint32(&c.mark) != 0 {
panic(dedicatedClientUsedAfterReleased)
return ErrClosing
}
return nil
}

func (c *dedicatedSingleClient) release() {
Expand Down
24 changes: 13 additions & 11 deletions client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -548,10 +548,10 @@ func TestSingleClient(t *testing.T) {
}
})

t.Run("Dedicate panic after released", func(t *testing.T) {
t.Run("Dedicate ErrClosing after released", func(t *testing.T) {
m.AcquireFn = func() wire { return &mockWire{} }
check := func() {
if err := recover(); err != dedicatedClientUsedAfterReleased {
check := func(err error) {
if !errors.Is(err, ErrClosing) {
t.Fatalf("unexpected err %v", err)
}
}
Expand All @@ -567,20 +567,22 @@ func TestSingleClient(t *testing.T) {
closeFn(c, cancel)
for _, fn := range []func(){
func() {
defer check()
c.Do(context.Background(), c.B().Get().Key("k").Build())
resp := c.Do(context.Background(), c.B().Get().Key("k").Build())
check(resp.Error())
},
func() {
defer check()
c.DoMulti(context.Background(), c.B().Get().Key("k").Build())
resp := c.DoMulti(context.Background(), c.B().Get().Key("k").Build())
for _, r := range resp {
check(r.Error())
}
},
func() {
defer check()
c.Receive(context.Background(), c.B().Subscribe().Channel("k").Build(), func(msg PubSubMessage) {})
err := c.Receive(context.Background(), c.B().Subscribe().Channel("k").Build(), func(msg PubSubMessage) {})
check(err)
},
func() {
defer check()
c.SetPubSubHooks(PubSubHooks{})
ch := c.SetPubSubHooks(PubSubHooks{})
check(<-ch)
},
} {
fn()
Expand Down
6 changes: 4 additions & 2 deletions cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1111,7 +1111,7 @@ func (c *dedicatedClusterClient) acquire(ctx context.Context, slot uint16) (wire
c.mu.Lock()
defer c.mu.Unlock()
if c.mark {
panic(dedicatedClientUsedAfterReleased)
return nil, ErrClosing
}
if c.slot == cmds.NoSlot {
c.slot = slot
Expand Down Expand Up @@ -1241,7 +1241,9 @@ func (c *dedicatedClusterClient) SetPubSubHooks(hooks PubSubHooks) <-chan error
c.mu.Lock()
defer c.mu.Unlock()
if c.mark {
panic(dedicatedClientUsedAfterReleased)
ch := make(chan error, 1)
ch <- ErrClosing
return ch
}
if p := c.pshks; p != nil {
c.pshks = nil
Expand Down
24 changes: 13 additions & 11 deletions cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1520,9 +1520,9 @@ func TestClusterClient(t *testing.T) {
}
})

t.Run("Dedicate panic after released", func(t *testing.T) {
check := func() {
if err := recover(); err != dedicatedClientUsedAfterReleased {
t.Run("Dedicate ErrClosing after released", func(t *testing.T) {
check := func(err error) {
if !errors.Is(err, ErrClosing) {
t.Fatalf("unexpected err %v", err)
}
}
Expand All @@ -1538,20 +1538,22 @@ func TestClusterClient(t *testing.T) {
closeFn(c, cancel)
for _, fn := range []func(){
func() {
defer check()
c.Do(context.Background(), c.B().Get().Key("k").Build())
resp := c.Do(context.Background(), c.B().Get().Key("k").Build())
check(resp.Error())
},
func() {
defer check()
c.DoMulti(context.Background(), c.B().Get().Key("k").Build())
resp := c.DoMulti(context.Background(), c.B().Get().Key("k").Build())
for _, r := range resp {
check(r.Error())
}
},
func() {
defer check()
c.Receive(context.Background(), c.B().Subscribe().Channel("k").Build(), func(msg PubSubMessage) {})
err := c.Receive(context.Background(), c.B().Subscribe().Channel("k").Build(), func(msg PubSubMessage) {})
check(err)
},
func() {
defer check()
c.SetPubSubHooks(PubSubHooks{})
ch := c.SetPubSubHooks(PubSubHooks{})
check(<-ch)
},
} {
fn()
Expand Down
1 change: 0 additions & 1 deletion rueidis.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,4 +392,3 @@ func dial(dst string, opt *ClientOption) (conn net.Conn, err error) {
}

const redisErrMsgCommandNotAllow = "command is not allowed"
const dedicatedClientUsedAfterReleased = "DedicatedClient should not be used after recycled"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @FZambia,

Many thanks! I originally planned to take care of this issue over the coming weekend.

The PR looks great and works great. We should indeed replace all panics caused by the c.check(). ErrClosing can work, however, I think to avoid further confusion we had better use a new error and keep the original message. For example: ErrDedicatedClientRecycled = errors.New("dedicated client should not be used after recycled")

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, will update PR - that was my first thought too but then I found ErrClosing and decided to re-use existing error. BTW, how about changing should -> must in error message?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good to me too. Just curious why you think a must is better.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was thinking in terms of https://www.ietf.org/rfc/rfc2119.txt. But thinking more, given my personal use case, it seems should can fit here too. Let's go with should.

Copy link
Contributor Author

@FZambia FZambia Jul 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rueian , so I started implementing an error as you suggested, and realized that I don't clearly see a semantic difference between ErrClosing and ErrDedicatedClientRecycled. Why do you think a separate error is better in this case to avoid the confusion and how it differs from ErrClosing?

From library user's perspective I think ErrClosing clearly states that the client was used after closing, while ErrDedicatedClientRecycled exposes implementation details to the user, and eventually it may be more confusing to read. WDYT?

Probably ErrClosed instead of ErrDedicatedClientRecycled could be much more clear here if ErrClosing does not semantically fit.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The message of ErrClosing is rueidis client is closing or unable to connect redis. I think it conveys too much misleading information in this case.

For example, when a user hits the c.check() and sees the message, he/she may wonder:

  1. I didn't call Close() on my client. Who closed my client? But it was actually he/she called the cancel().
  2. unable to connect redis? Was there a networking issue? But it was actually he/she called the cancel().

I think this is how ErrDedicatedClientRecycled differs from ErrClosing. It serves the c.check() only.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thx for the answer. It's more clear to me now – so it's an error specific to the state of dedicated clients only. I just pushed the updated version, please check it out.