Skip to content

Commit

Permalink
Have rls's LRU Cache rely on cleanup process to remove expired entries (
Browse files Browse the repository at this point in the history
#10400)

* Add test for multiple targets with cache expiration.
  • Loading branch information
larry-safran authored Jul 21, 2023
1 parent 419767f commit afa4d6d
Show file tree
Hide file tree
Showing 3 changed files with 60 additions and 16 deletions.
1 change: 0 additions & 1 deletion rls/src/main/java/io/grpc/rls/LinkedHashLruCache.java
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,6 @@ private SizedValue readInternal(K key) {
synchronized (lock) {
SizedValue existing = delegate.get(key);
if (existing != null && isExpired(key, existing.value, ticker.read())) {
invalidate(key, EvictionType.EXPIRED);
return null;
}
return existing;
Expand Down
68 changes: 55 additions & 13 deletions rls/src/test/java/io/grpc/rls/CachingRlsLbClientTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -354,12 +354,7 @@ public void get_updatesLbState() throws Exception {
assertThat(stateCaptor.getAllValues())
.containsExactly(ConnectivityState.CONNECTING, ConnectivityState.READY);
Metadata headers = new Metadata();
PickResult pickResult = pickerCaptor.getValue().pickSubchannel(
new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod().toBuilder().setFullMethodName("service1/create")
.build(),
headers,
CallOptions.DEFAULT));
PickResult pickResult = getPickResultForCreate(pickerCaptor, headers);
assertThat(pickResult.getStatus().isOk()).isTrue();
assertThat(pickResult.getSubchannel()).isNotNull();
assertThat(headers.get(RLS_DATA_KEY)).isEqualTo("header-rls-data-value");
Expand Down Expand Up @@ -395,6 +390,57 @@ public void get_updatesLbState() throws Exception {
assertThat(fakeThrottler.getNumUnthrottled()).isEqualTo(1);
}

@Test
public void timeout_not_changing_picked_subchannel() throws Exception {
setUpRlsLbClient();
RouteLookupRequest routeLookupRequest = RouteLookupRequest.create(ImmutableMap.of(
"server", "bigtable.googleapis.com", "service-key", "service1", "method-key", "create"));
rlsServerImpl.setLookupTable(
ImmutableMap.of(
routeLookupRequest,
RouteLookupResponse.create(
ImmutableList.of("primary.cloudbigtable.googleapis.com", "target2", "target3"),
"header-rls-data-value")));

// valid channel
CachedRouteLookupResponse resp = getInSyncContext(routeLookupRequest);
assertThat(resp.hasData()).isFalse();
fakeClock.forwardTime(SERVER_LATENCY_MILLIS, TimeUnit.MILLISECONDS);

resp = getInSyncContext(routeLookupRequest);
assertThat(resp.hasData()).isTrue();

ArgumentCaptor<SubchannelPicker> pickerCaptor = ArgumentCaptor.forClass(SubchannelPicker.class);
ArgumentCaptor<ConnectivityState> stateCaptor =
ArgumentCaptor.forClass(ConnectivityState.class);
verify(helper, times(4)).updateBalancingState(stateCaptor.capture(), pickerCaptor.capture());

Metadata headers = new Metadata();
PickResult pickResult = getPickResultForCreate(pickerCaptor, headers);
assertThat(pickResult.getStatus().isOk()).isTrue();
assertThat(pickResult.getSubchannel().toString())
.isEqualTo("primary.cloudbigtable.googleapis.com");

fakeClock.forwardTime(5, TimeUnit.MINUTES);
PickResult pickResult2 = getPickResultForCreate(pickerCaptor, headers);
assertThat(pickResult2.getSubchannel()).isNull();
fakeClock.forwardTime(SERVER_LATENCY_MILLIS, TimeUnit.MILLISECONDS);
PickResult pickResult3 = getPickResultForCreate(pickerCaptor, headers);
assertThat(pickResult3.getSubchannel()).isNotNull();
assertThat(pickResult3.getSubchannel().toString())
.isEqualTo(pickResult.getSubchannel().toString());
}

private static PickResult getPickResultForCreate(ArgumentCaptor<SubchannelPicker> pickerCaptor,
Metadata headers) {
return pickerCaptor.getValue().pickSubchannel(
new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod().toBuilder().setFullMethodName("service1/create")
.build(),
headers,
CallOptions.DEFAULT));
}

@Test
public void get_withAdaptiveThrottler() throws Exception {
AdaptiveThrottler adaptiveThrottler =
Expand Down Expand Up @@ -440,12 +486,7 @@ public void get_withAdaptiveThrottler() throws Exception {
.updateBalancingState(stateCaptor.capture(), pickerCaptor.capture());

Metadata headers = new Metadata();
PickResult pickResult = pickerCaptor.getValue().pickSubchannel(
new PickSubchannelArgsImpl(
TestMethodDescriptors.voidMethod().toBuilder().setFullMethodName("service1/create")
.build(),
headers,
CallOptions.DEFAULT));
PickResult pickResult = getPickResultForCreate(pickerCaptor, headers);
assertThat(pickResult.getSubchannel()).isNotNull();
assertThat(headers.get(RLS_DATA_KEY)).isEqualTo("header-rls-data-value");

Expand Down Expand Up @@ -680,7 +721,8 @@ public PickResult pickSubchannel(PickSubchannelArgs args) {
new SubchannelPicker() {
@Override
public PickResult pickSubchannel(PickSubchannelArgs args) {
return PickResult.withSubchannel(mock(Subchannel.class));
return PickResult.withSubchannel(
mock(Subchannel.class, config.get("target").toString()));
}
});
}
Expand Down
7 changes: 5 additions & 2 deletions rls/src/test/java/io/grpc/rls/LinkedHashLruCacheTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -150,13 +150,16 @@ public void eviction_size_shouldEvictAlreadyExpired() {
}

@Test
public void eviction_get_shouldNotReturnAlreadyExpired() {
public void eviction_cleanupShouldRemoveAlreadyExpired() {
for (int i = 1; i <= MAX_SIZE; i++) {
// last entry is already expired when added
cache.cache(i, new Entry("Entry" + i, ticker.read() + MAX_SIZE - i));
cache.cache(i, new Entry("Entry" + i,
ticker.read() + ((MAX_SIZE - i) * TimeUnit.MINUTES.toNanos(1)) + 1));
}

assertThat(cache.estimatedSize()).isEqualTo(MAX_SIZE);

fakeClock.forwardTime(1, TimeUnit.MINUTES);
assertThat(cache.read(MAX_SIZE)).isNull();
assertThat(cache.estimatedSize()).isEqualTo(MAX_SIZE - 1);
verify(evictionListener).onEviction(eq(MAX_SIZE), any(Entry.class), eq(EvictionType.EXPIRED));
Expand Down

0 comments on commit afa4d6d

Please sign in to comment.