Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support return_all_tokens & stop_seqs #2584

Merged
merged 6 commits into from
Jan 8, 2025
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
make return_all_tokens work
  • Loading branch information
ming1753 committed Jan 6, 2025
commit cbd77205f399902e4253eb6a945c10f0d881516e
26 changes: 26 additions & 0 deletions llm/server/server/triton_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,37 @@ def _push_mode_sender_thread(self):
except Exception as e:
model_server_logger.error("Unexcepted error happend: {}, {}".format(e, str(traceback.format_exc())))

def _cache_special_tokens(self, batch_result):
for i in range(len(batch_result)):
is_end = batch_result[i].get("is_end", 0)
token_ids = batch_result[i]["token_ids"]
return_all_tokens = batch_result[i].get("return_all_tokens", False)
cache_special_token = False if is_end == 1 else (13 <= token_ids[0] <= 268)
if is_end != 1 and (cache_special_token or return_all_tokens or self.cfg.disable_streaming):
if batch_result[i]["req_id"] not in self.token_buffer:
self.token_buffer[batch_result[i]["req_id"]] = list()
self.score_buffer[batch_result[i]["req_id"]] = list()
self.token_buffer[batch_result[i]["req_id"]].extend(token_ids)
self.score_buffer[batch_result[i]["req_id"]].extend(batch_result[i].get("token_scores", []))
batch_result[i]["token_ids"] = []
if "token_scores" in batch_result[i]:
batch_result[i]["token_scores"] = []
else:
if batch_result[i]["req_id"] in self.token_buffer:
batch_result[i]["token_ids"] = self.token_buffer[batch_result[i]
["req_id"]] + batch_result[i]["token_ids"]
del self.token_buffer[batch_result[i]["req_id"]]
if "token_scores" in batch_result[i]:
batch_result[i]["token_scores"] = self.score_buffer[batch_result[i]
["req_id"]] + batch_result[i]["token_scores"]
del self.score_buffer[batch_result[i]["req_id"]]

def postprocess(self, batch_result, exist_finished_task=False):
"""
single postprocess for triton
"""
try:
self._cache_special_tokens(batch_result)
self.cached_generated_tokens.put(batch_result)
except Exception as e:
model_server_logger.info(
Expand Down