Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Flags for varying NIM seed and temperature every call #808

Merged
merged 8 commits into from
Jul 30, 2024
11 changes: 11 additions & 0 deletions garak/generators/nim.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,12 @@

"""NVIDIA Inference Microservice LLM interface"""

import random
from typing import List, Union

import openai

from garak import _config
from garak.generators.openai import OpenAICompatible


Expand Down Expand Up @@ -38,6 +40,8 @@ class NVOpenAIChat(OpenAICompatible):
"top_p": 0.7,
"top_k": 0, # top_k is hard set to zero as of 24.04.30
"uri": "https://integrate.api.nvidia.com/v1/",
"vary_seed_each_call": True, # encourage variation when generations>1. not respected by all NIMs
"vary_temp_each_call": True, # encourage variation when generations>1. not respected by all NIMs
"suppressed_params": {"n", "frequency_penalty", "presence_penalty"},
}
active = True
Expand Down Expand Up @@ -67,6 +71,13 @@ def _call_model(
assert (
generations_this_call == 1
), "generations_per_call / n > 1 is not supported"

if self.vary_seed_each_call:
_config.run.seed = random.randint(0, 65535)

if self.vary_temp_each_call:
self.temperature = random.random()

return super()._call_model(prompt, generations_this_call)


Expand Down
9 changes: 6 additions & 3 deletions garak/generators/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
# lists derived from https://platform.openai.com/docs/models
chat_models = (
"gpt-4", # links to latest version
"gpt-4-turbo", # links to latest version
"gpt-4o", # links to latest version
"gpt-4o-mini", # links to latest version
"gpt-4-turbo", # links to latest version
"gpt-4o", # links to latest version
"gpt-4o-mini", # links to latest version
"gpt-4-turbo-preview",
"gpt-3.5-turbo", # links to latest version
"gpt-4-32k",
Expand Down Expand Up @@ -185,6 +185,9 @@ def _call_model(
"stop": self.stop,
}

if _config.run.seed is not None:
create_args["seed"] = _config.run.seed

create_args = {
k: v
for k, v in create_args.items()
Expand Down
Loading