diff --git a/.stats.yml b/.stats.yml
index 03b0268ffa..c550abf3c6 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 57
+configured_endpoints: 51
diff --git a/README.md b/README.md
index 989f838384..e86ac6553e 100644
--- a/README.md
+++ b/README.md
@@ -296,8 +296,9 @@ from openai import OpenAI
client = OpenAI()
try:
- client.fine_tunes.create(
- training_file="file-XGinujblHPwGLSztz8cPS8XY",
+ client.fine_tuning.jobs.create(
+ model="gpt-3.5-turbo",
+ training_file="file-abc123",
)
except openai.APIConnectionError as e:
print("The server could not be reached")
diff --git a/api.md b/api.md
index 9d9993105b..86b972d14e 100644
--- a/api.md
+++ b/api.md
@@ -50,18 +50,6 @@ Methods:
- client.chat.completions.create(\*\*params) -> ChatCompletion
-# Edits
-
-Types:
-
-```python
-from openai.types import Edit
-```
-
-Methods:
-
-- client.edits.create(\*\*params) -> Edit
-
# Embeddings
Types:
@@ -182,22 +170,6 @@ Methods:
- client.fine_tuning.jobs.cancel(fine_tuning_job_id) -> FineTuningJob
- client.fine_tuning.jobs.list_events(fine_tuning_job_id, \*\*params) -> SyncCursorPage[FineTuningJobEvent]
-# FineTunes
-
-Types:
-
-```python
-from openai.types import FineTune, FineTuneEvent, FineTuneEventsListResponse
-```
-
-Methods:
-
-- client.fine_tunes.create(\*\*params) -> FineTune
-- client.fine_tunes.retrieve(fine_tune_id) -> FineTune
-- client.fine_tunes.list() -> SyncPage[FineTune]
-- client.fine_tunes.cancel(fine_tune_id) -> FineTune
-- client.fine_tunes.list_events(fine_tune_id, \*\*params) -> FineTuneEventsListResponse
-
# Beta
## Assistants
diff --git a/src/openai/__init__.py b/src/openai/__init__.py
index ee96f06919..64c93e9449 100644
--- a/src/openai/__init__.py
+++ b/src/openai/__init__.py
@@ -316,12 +316,10 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction]
beta as beta,
chat as chat,
audio as audio,
- edits as edits,
files as files,
images as images,
models as models,
embeddings as embeddings,
- fine_tunes as fine_tunes,
completions as completions,
fine_tuning as fine_tuning,
moderations as moderations,
diff --git a/src/openai/_client.py b/src/openai/_client.py
index 9eb6888909..09f54e1b12 100644
--- a/src/openai/_client.py
+++ b/src/openai/_client.py
@@ -49,7 +49,6 @@
class OpenAI(SyncAPIClient):
completions: resources.Completions
chat: resources.Chat
- edits: resources.Edits
embeddings: resources.Embeddings
files: resources.Files
images: resources.Images
@@ -57,7 +56,6 @@ class OpenAI(SyncAPIClient):
moderations: resources.Moderations
models: resources.Models
fine_tuning: resources.FineTuning
- fine_tunes: resources.FineTunes
beta: resources.Beta
with_raw_response: OpenAIWithRawResponse
@@ -125,7 +123,6 @@ def __init__(
self.completions = resources.Completions(self)
self.chat = resources.Chat(self)
- self.edits = resources.Edits(self)
self.embeddings = resources.Embeddings(self)
self.files = resources.Files(self)
self.images = resources.Images(self)
@@ -133,7 +130,6 @@ def __init__(
self.moderations = resources.Moderations(self)
self.models = resources.Models(self)
self.fine_tuning = resources.FineTuning(self)
- self.fine_tunes = resources.FineTunes(self)
self.beta = resources.Beta(self)
self.with_raw_response = OpenAIWithRawResponse(self)
@@ -249,7 +245,6 @@ def _make_status_error(
class AsyncOpenAI(AsyncAPIClient):
completions: resources.AsyncCompletions
chat: resources.AsyncChat
- edits: resources.AsyncEdits
embeddings: resources.AsyncEmbeddings
files: resources.AsyncFiles
images: resources.AsyncImages
@@ -257,7 +252,6 @@ class AsyncOpenAI(AsyncAPIClient):
moderations: resources.AsyncModerations
models: resources.AsyncModels
fine_tuning: resources.AsyncFineTuning
- fine_tunes: resources.AsyncFineTunes
beta: resources.AsyncBeta
with_raw_response: AsyncOpenAIWithRawResponse
@@ -325,7 +319,6 @@ def __init__(
self.completions = resources.AsyncCompletions(self)
self.chat = resources.AsyncChat(self)
- self.edits = resources.AsyncEdits(self)
self.embeddings = resources.AsyncEmbeddings(self)
self.files = resources.AsyncFiles(self)
self.images = resources.AsyncImages(self)
@@ -333,7 +326,6 @@ def __init__(
self.moderations = resources.AsyncModerations(self)
self.models = resources.AsyncModels(self)
self.fine_tuning = resources.AsyncFineTuning(self)
- self.fine_tunes = resources.AsyncFineTunes(self)
self.beta = resources.AsyncBeta(self)
self.with_raw_response = AsyncOpenAIWithRawResponse(self)
@@ -450,7 +442,6 @@ class OpenAIWithRawResponse:
def __init__(self, client: OpenAI) -> None:
self.completions = resources.CompletionsWithRawResponse(client.completions)
self.chat = resources.ChatWithRawResponse(client.chat)
- self.edits = resources.EditsWithRawResponse(client.edits)
self.embeddings = resources.EmbeddingsWithRawResponse(client.embeddings)
self.files = resources.FilesWithRawResponse(client.files)
self.images = resources.ImagesWithRawResponse(client.images)
@@ -458,7 +449,6 @@ def __init__(self, client: OpenAI) -> None:
self.moderations = resources.ModerationsWithRawResponse(client.moderations)
self.models = resources.ModelsWithRawResponse(client.models)
self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning)
- self.fine_tunes = resources.FineTunesWithRawResponse(client.fine_tunes)
self.beta = resources.BetaWithRawResponse(client.beta)
@@ -466,7 +456,6 @@ class AsyncOpenAIWithRawResponse:
def __init__(self, client: AsyncOpenAI) -> None:
self.completions = resources.AsyncCompletionsWithRawResponse(client.completions)
self.chat = resources.AsyncChatWithRawResponse(client.chat)
- self.edits = resources.AsyncEditsWithRawResponse(client.edits)
self.embeddings = resources.AsyncEmbeddingsWithRawResponse(client.embeddings)
self.files = resources.AsyncFilesWithRawResponse(client.files)
self.images = resources.AsyncImagesWithRawResponse(client.images)
@@ -474,7 +463,6 @@ def __init__(self, client: AsyncOpenAI) -> None:
self.moderations = resources.AsyncModerationsWithRawResponse(client.moderations)
self.models = resources.AsyncModelsWithRawResponse(client.models)
self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning)
- self.fine_tunes = resources.AsyncFineTunesWithRawResponse(client.fine_tunes)
self.beta = resources.AsyncBetaWithRawResponse(client.beta)
diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py
index fe8e0a2139..d66e137ecd 100644
--- a/src/openai/_module_client.py
+++ b/src/openai/_module_client.py
@@ -18,12 +18,6 @@ def __load__(self) -> resources.Beta:
return _load_client().beta
-class EditsProxy(LazyProxy[resources.Edits]):
- @override
- def __load__(self) -> resources.Edits:
- return _load_client().edits
-
-
class FilesProxy(LazyProxy[resources.Files]):
@override
def __load__(self) -> resources.Files:
@@ -54,12 +48,6 @@ def __load__(self) -> resources.Embeddings:
return _load_client().embeddings
-class FineTunesProxy(LazyProxy[resources.FineTunes]):
- @override
- def __load__(self) -> resources.FineTunes:
- return _load_client().fine_tunes
-
-
class CompletionsProxy(LazyProxy[resources.Completions]):
@override
def __load__(self) -> resources.Completions:
@@ -80,13 +68,11 @@ def __load__(self) -> resources.FineTuning:
chat: resources.Chat = ChatProxy().__as_proxied__()
beta: resources.Beta = BetaProxy().__as_proxied__()
-edits: resources.Edits = EditsProxy().__as_proxied__()
files: resources.Files = FilesProxy().__as_proxied__()
audio: resources.Audio = AudioProxy().__as_proxied__()
images: resources.Images = ImagesProxy().__as_proxied__()
models: resources.Models = ModelsProxy().__as_proxied__()
embeddings: resources.Embeddings = EmbeddingsProxy().__as_proxied__()
-fine_tunes: resources.FineTunes = FineTunesProxy().__as_proxied__()
completions: resources.Completions = CompletionsProxy().__as_proxied__()
moderations: resources.Moderations = ModerationsProxy().__as_proxied__()
fine_tuning: resources.FineTuning = FineTuningProxy().__as_proxied__()
diff --git a/src/openai/resources/__init__.py b/src/openai/resources/__init__.py
index 2cdbeb6ae1..8219be12e6 100644
--- a/src/openai/resources/__init__.py
+++ b/src/openai/resources/__init__.py
@@ -3,12 +3,10 @@
from .beta import Beta, AsyncBeta, BetaWithRawResponse, AsyncBetaWithRawResponse
from .chat import Chat, AsyncChat, ChatWithRawResponse, AsyncChatWithRawResponse
from .audio import Audio, AsyncAudio, AudioWithRawResponse, AsyncAudioWithRawResponse
-from .edits import Edits, AsyncEdits, EditsWithRawResponse, AsyncEditsWithRawResponse
from .files import Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse
from .images import Images, AsyncImages, ImagesWithRawResponse, AsyncImagesWithRawResponse
from .models import Models, AsyncModels, ModelsWithRawResponse, AsyncModelsWithRawResponse
from .embeddings import Embeddings, AsyncEmbeddings, EmbeddingsWithRawResponse, AsyncEmbeddingsWithRawResponse
-from .fine_tunes import FineTunes, AsyncFineTunes, FineTunesWithRawResponse, AsyncFineTunesWithRawResponse
from .completions import Completions, AsyncCompletions, CompletionsWithRawResponse, AsyncCompletionsWithRawResponse
from .fine_tuning import FineTuning, AsyncFineTuning, FineTuningWithRawResponse, AsyncFineTuningWithRawResponse
from .moderations import Moderations, AsyncModerations, ModerationsWithRawResponse, AsyncModerationsWithRawResponse
@@ -22,10 +20,6 @@
"AsyncChat",
"ChatWithRawResponse",
"AsyncChatWithRawResponse",
- "Edits",
- "AsyncEdits",
- "EditsWithRawResponse",
- "AsyncEditsWithRawResponse",
"Embeddings",
"AsyncEmbeddings",
"EmbeddingsWithRawResponse",
@@ -54,10 +48,6 @@
"AsyncFineTuning",
"FineTuningWithRawResponse",
"AsyncFineTuningWithRawResponse",
- "FineTunes",
- "AsyncFineTunes",
- "FineTunesWithRawResponse",
- "AsyncFineTunesWithRawResponse",
"Beta",
"AsyncBeta",
"BetaWithRawResponse",
diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py
index b047c1d2a0..fa096784d2 100644
--- a/src/openai/resources/chat/completions.py
+++ b/src/openai/resources/chat/completions.py
@@ -185,7 +185,7 @@ def create(
will not call a function and instead generates a message. `auto` means the model
can pick between generating a message or calling a function. Specifying a
particular function via
- `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that function.
`none` is the default when no functions are present. `auto` is the default if
@@ -371,7 +371,7 @@ def create(
will not call a function and instead generates a message. `auto` means the model
can pick between generating a message or calling a function. Specifying a
particular function via
- `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that function.
`none` is the default when no functions are present. `auto` is the default if
@@ -557,7 +557,7 @@ def create(
will not call a function and instead generates a message. `auto` means the model
can pick between generating a message or calling a function. Specifying a
particular function via
- `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that function.
`none` is the default when no functions are present. `auto` is the default if
@@ -833,7 +833,7 @@ async def create(
will not call a function and instead generates a message. `auto` means the model
can pick between generating a message or calling a function. Specifying a
particular function via
- `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that function.
`none` is the default when no functions are present. `auto` is the default if
@@ -1019,7 +1019,7 @@ async def create(
will not call a function and instead generates a message. `auto` means the model
can pick between generating a message or calling a function. Specifying a
particular function via
- `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that function.
`none` is the default when no functions are present. `auto` is the default if
@@ -1205,7 +1205,7 @@ async def create(
will not call a function and instead generates a message. `auto` means the model
can pick between generating a message or calling a function. Specifying a
particular function via
- `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that function.
`none` is the default when no functions are present. `auto` is the default if
diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py
index d3e7c54b11..87dd090052 100644
--- a/src/openai/resources/completions.py
+++ b/src/openai/resources/completions.py
@@ -30,21 +30,7 @@ def with_raw_response(self) -> CompletionsWithRawResponse:
def create(
self,
*,
- model: Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ],
+ model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, List[str], List[int], List[List[int]], None],
best_of: Optional[int] | NotGiven = NOT_GIVEN,
echo: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -107,12 +93,11 @@ def create(
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
- convert text to token IDs. Mathematically, the bias is added to the logits
- generated by the model prior to sampling. The exact effect will vary per model,
- but values between -1 and 1 should decrease or increase likelihood of selection;
- values like -100 or 100 should result in a ban or exclusive selection of the
- relevant token.
+ [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ Mathematically, the bias is added to the logits generated by the model prior to
+ sampling. The exact effect will vary per model, but values between -1 and 1
+ should decrease or increase likelihood of selection; values like -100 or 100
+ should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
@@ -193,21 +178,7 @@ def create(
def create(
self,
*,
- model: Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ],
+ model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, List[str], List[int], List[List[int]], None],
stream: Literal[True],
best_of: Optional[int] | NotGiven = NOT_GIVEN,
@@ -277,12 +248,11 @@ def create(
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
- convert text to token IDs. Mathematically, the bias is added to the logits
- generated by the model prior to sampling. The exact effect will vary per model,
- but values between -1 and 1 should decrease or increase likelihood of selection;
- values like -100 or 100 should result in a ban or exclusive selection of the
- relevant token.
+ [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ Mathematically, the bias is added to the logits generated by the model prior to
+ sampling. The exact effect will vary per model, but values between -1 and 1
+ should decrease or increase likelihood of selection; values like -100 or 100
+ should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
@@ -356,21 +326,7 @@ def create(
def create(
self,
*,
- model: Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ],
+ model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, List[str], List[int], List[List[int]], None],
stream: bool,
best_of: Optional[int] | NotGiven = NOT_GIVEN,
@@ -440,12 +396,11 @@ def create(
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
- convert text to token IDs. Mathematically, the bias is added to the logits
- generated by the model prior to sampling. The exact effect will vary per model,
- but values between -1 and 1 should decrease or increase likelihood of selection;
- values like -100 or 100 should result in a ban or exclusive selection of the
- relevant token.
+ [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ Mathematically, the bias is added to the logits generated by the model prior to
+ sampling. The exact effect will vary per model, but values between -1 and 1
+ should decrease or increase likelihood of selection; values like -100 or 100
+ should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
@@ -519,21 +474,7 @@ def create(
def create(
self,
*,
- model: Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ],
+ model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, List[str], List[int], List[List[int]], None],
best_of: Optional[int] | NotGiven = NOT_GIVEN,
echo: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -599,21 +540,7 @@ def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
async def create(
self,
*,
- model: Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ],
+ model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, List[str], List[int], List[List[int]], None],
best_of: Optional[int] | NotGiven = NOT_GIVEN,
echo: Optional[bool] | NotGiven = NOT_GIVEN,
@@ -676,12 +603,11 @@ async def create(
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
- convert text to token IDs. Mathematically, the bias is added to the logits
- generated by the model prior to sampling. The exact effect will vary per model,
- but values between -1 and 1 should decrease or increase likelihood of selection;
- values like -100 or 100 should result in a ban or exclusive selection of the
- relevant token.
+ [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ Mathematically, the bias is added to the logits generated by the model prior to
+ sampling. The exact effect will vary per model, but values between -1 and 1
+ should decrease or increase likelihood of selection; values like -100 or 100
+ should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
@@ -762,21 +688,7 @@ async def create(
async def create(
self,
*,
- model: Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ],
+ model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, List[str], List[int], List[List[int]], None],
stream: Literal[True],
best_of: Optional[int] | NotGiven = NOT_GIVEN,
@@ -846,12 +758,11 @@ async def create(
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
- convert text to token IDs. Mathematically, the bias is added to the logits
- generated by the model prior to sampling. The exact effect will vary per model,
- but values between -1 and 1 should decrease or increase likelihood of selection;
- values like -100 or 100 should result in a ban or exclusive selection of the
- relevant token.
+ [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ Mathematically, the bias is added to the logits generated by the model prior to
+ sampling. The exact effect will vary per model, but values between -1 and 1
+ should decrease or increase likelihood of selection; values like -100 or 100
+ should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
@@ -925,21 +836,7 @@ async def create(
async def create(
self,
*,
- model: Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ],
+ model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, List[str], List[int], List[List[int]], None],
stream: bool,
best_of: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1009,12 +906,11 @@ async def create(
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
- convert text to token IDs. Mathematically, the bias is added to the logits
- generated by the model prior to sampling. The exact effect will vary per model,
- but values between -1 and 1 should decrease or increase likelihood of selection;
- values like -100 or 100 should result in a ban or exclusive selection of the
- relevant token.
+ [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ Mathematically, the bias is added to the logits generated by the model prior to
+ sampling. The exact effect will vary per model, but values between -1 and 1
+ should decrease or increase likelihood of selection; values like -100 or 100
+ should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
@@ -1088,21 +984,7 @@ async def create(
async def create(
self,
*,
- model: Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ],
+ model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]],
prompt: Union[str, List[str], List[int], List[List[int]], None],
best_of: Optional[int] | NotGiven = NOT_GIVEN,
echo: Optional[bool] | NotGiven = NOT_GIVEN,
diff --git a/src/openai/resources/edits.py b/src/openai/resources/edits.py
deleted file mode 100644
index ac15494263..0000000000
--- a/src/openai/resources/edits.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-import typing_extensions
-from typing import Union, Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import Edit, edit_create_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import to_raw_response_wrapper, async_to_raw_response_wrapper
-from .._base_client import (
- make_request_options,
-)
-
-__all__ = ["Edits", "AsyncEdits"]
-
-
-class Edits(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> EditsWithRawResponse:
- return EditsWithRawResponse(self)
-
- @typing_extensions.deprecated(
- "The Edits API is deprecated; please use Chat Completions instead.\n\nhttps://openai.com/blog/gpt-4-api-general-availability#deprecation-of-the-edits-api\n"
- )
- def create(
- self,
- *,
- instruction: str,
- model: Union[str, Literal["text-davinci-edit-001", "code-davinci-edit-001"]],
- input: Optional[str] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Edit:
- """
- Creates a new edit for the provided input, instruction, and parameters.
-
- Args:
- instruction: The instruction that tells the model how to edit the prompt.
-
- model: ID of the model to use. You can use the `text-davinci-edit-001` or
- `code-davinci-edit-001` model with this endpoint.
-
- input: The input text to use as a starting point for the edit.
-
- n: How many edits to generate for the input and instruction.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/edits",
- body=maybe_transform(
- {
- "instruction": instruction,
- "model": model,
- "input": input,
- "n": n,
- "temperature": temperature,
- "top_p": top_p,
- },
- edit_create_params.EditCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Edit,
- )
-
-
-class AsyncEdits(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncEditsWithRawResponse:
- return AsyncEditsWithRawResponse(self)
-
- @typing_extensions.deprecated(
- "The Edits API is deprecated; please use Chat Completions instead.\n\nhttps://openai.com/blog/gpt-4-api-general-availability#deprecation-of-the-edits-api\n"
- )
- async def create(
- self,
- *,
- instruction: str,
- model: Union[str, Literal["text-davinci-edit-001", "code-davinci-edit-001"]],
- input: Optional[str] | NotGiven = NOT_GIVEN,
- n: Optional[int] | NotGiven = NOT_GIVEN,
- temperature: Optional[float] | NotGiven = NOT_GIVEN,
- top_p: Optional[float] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Edit:
- """
- Creates a new edit for the provided input, instruction, and parameters.
-
- Args:
- instruction: The instruction that tells the model how to edit the prompt.
-
- model: ID of the model to use. You can use the `text-davinci-edit-001` or
- `code-davinci-edit-001` model with this endpoint.
-
- input: The input text to use as a starting point for the edit.
-
- n: How many edits to generate for the input and instruction.
-
- temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
- make the output more random, while lower values like 0.2 will make it more
- focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
-
- top_p: An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/edits",
- body=maybe_transform(
- {
- "instruction": instruction,
- "model": model,
- "input": input,
- "n": n,
- "temperature": temperature,
- "top_p": top_p,
- },
- edit_create_params.EditCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Edit,
- )
-
-
-class EditsWithRawResponse:
- def __init__(self, edits: Edits) -> None:
- self.create = to_raw_response_wrapper( # pyright: ignore[reportDeprecated]
- edits.create # pyright: ignore[reportDeprecated],
- )
-
-
-class AsyncEditsWithRawResponse:
- def __init__(self, edits: AsyncEdits) -> None:
- self.create = async_to_raw_response_wrapper( # pyright: ignore[reportDeprecated]
- edits.create # pyright: ignore[reportDeprecated],
- )
diff --git a/src/openai/resources/fine_tunes.py b/src/openai/resources/fine_tunes.py
deleted file mode 100644
index 411952387c..0000000000
--- a/src/openai/resources/fine_tunes.py
+++ /dev/null
@@ -1,819 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-from typing import List, Union, Optional, overload
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import (
- FineTune,
- FineTuneEvent,
- FineTuneEventsListResponse,
- fine_tune_create_params,
- fine_tune_list_events_params,
-)
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import to_raw_response_wrapper, async_to_raw_response_wrapper
-from .._streaming import Stream, AsyncStream
-from ..pagination import SyncPage, AsyncPage
-from .._base_client import (
- AsyncPaginator,
- make_request_options,
-)
-
-__all__ = ["FineTunes", "AsyncFineTunes"]
-
-
-class FineTunes(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> FineTunesWithRawResponse:
- return FineTunesWithRawResponse(self)
-
- def create(
- self,
- *,
- training_file: str,
- batch_size: Optional[int] | NotGiven = NOT_GIVEN,
- classification_betas: Optional[List[float]] | NotGiven = NOT_GIVEN,
- classification_n_classes: Optional[int] | NotGiven = NOT_GIVEN,
- classification_positive_class: Optional[str] | NotGiven = NOT_GIVEN,
- compute_classification_metrics: Optional[bool] | NotGiven = NOT_GIVEN,
- hyperparameters: fine_tune_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
- learning_rate_multiplier: Optional[float] | NotGiven = NOT_GIVEN,
- model: Union[str, Literal["ada", "babbage", "curie", "davinci"], None] | NotGiven = NOT_GIVEN,
- prompt_loss_weight: Optional[float] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- validation_file: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTune:
- """
- Creates a job that fine-tunes a specified model from a given dataset.
-
- Response includes details of the enqueued job including job status and the name
- of the fine-tuned models once complete.
-
- [Learn more about fine-tuning](https://platform.openai.com/docs/guides/legacy-fine-tuning)
-
- Args:
- training_file: The ID of an uploaded file that contains training data.
-
- See [upload file](https://platform.openai.com/docs/api-reference/files/upload)
- for how to upload a file.
-
- Your dataset must be formatted as a JSONL file, where each training example is a
- JSON object with the keys "prompt" and "completion". Additionally, you must
- upload your file with the purpose `fine-tune`.
-
- See the
- [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
- for more details.
-
- batch_size: The batch size to use for training. The batch size is the number of training
- examples used to train a single forward and backward pass.
-
- By default, the batch size will be dynamically configured to be ~0.2% of the
- number of examples in the training set, capped at 256 - in general, we've found
- that larger batch sizes tend to work better for larger datasets.
-
- classification_betas: If this is provided, we calculate F-beta scores at the specified beta values.
- The F-beta score is a generalization of F-1 score. This is only used for binary
- classification.
-
- With a beta of 1 (i.e. the F-1 score), precision and recall are given the same
- weight. A larger beta score puts more weight on recall and less on precision. A
- smaller beta score puts more weight on precision and less on recall.
-
- classification_n_classes: The number of classes in a classification task.
-
- This parameter is required for multiclass classification.
-
- classification_positive_class: The positive class in binary classification.
-
- This parameter is needed to generate precision, recall, and F1 metrics when
- doing binary classification.
-
- compute_classification_metrics: If set, we calculate classification-specific metrics such as accuracy and F-1
- score using the validation set at the end of every epoch. These metrics can be
- viewed in the
- [results file](https://platform.openai.com/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
-
- In order to compute classification metrics, you must provide a
- `validation_file`. Additionally, you must specify `classification_n_classes` for
- multiclass classification or `classification_positive_class` for binary
- classification.
-
- hyperparameters: The hyperparameters used for the fine-tuning job.
-
- learning_rate_multiplier: The learning rate multiplier to use for training. The fine-tuning learning rate
- is the original learning rate used for pretraining multiplied by this value.
-
- By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on
- final `batch_size` (larger learning rates tend to perform better with larger
- batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to
- see what produces the best results.
-
- model: The name of the base model to fine-tune. You can select one of "ada", "babbage",
- "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before
- 2023-08-22. To learn more about these models, see the
- [Models](https://platform.openai.com/docs/models) documentation.
-
- prompt_loss_weight: The weight to use for loss on the prompt tokens. This controls how much the
- model tries to learn to generate the prompt (as compared to the completion which
- always has a weight of 1.0), and can add a stabilizing effect to training when
- completions are short.
-
- If prompts are extremely long (relative to completions), it may make sense to
- reduce this weight so as to avoid over-prioritizing learning the prompt.
-
- suffix: A string of up to 40 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
-
- validation_file: The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the
- [fine-tuning results file](https://platform.openai.com/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
- Your train and validation data should be mutually exclusive.
-
- Your dataset must be formatted as a JSONL file, where each validation example is
- a JSON object with the keys "prompt" and "completion". Additionally, you must
- upload your file with the purpose `fine-tune`.
-
- See the
- [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
- for more details.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/fine-tunes",
- body=maybe_transform(
- {
- "training_file": training_file,
- "batch_size": batch_size,
- "classification_betas": classification_betas,
- "classification_n_classes": classification_n_classes,
- "classification_positive_class": classification_positive_class,
- "compute_classification_metrics": compute_classification_metrics,
- "hyperparameters": hyperparameters,
- "learning_rate_multiplier": learning_rate_multiplier,
- "model": model,
- "prompt_loss_weight": prompt_loss_weight,
- "suffix": suffix,
- "validation_file": validation_file,
- },
- fine_tune_create_params.FineTuneCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTune,
- )
-
- def retrieve(
- self,
- fine_tune_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTune:
- """
- Gets info about the fine-tune job.
-
- [Learn more about fine-tuning](https://platform.openai.com/docs/guides/legacy-fine-tuning)
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- f"/fine-tunes/{fine_tune_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTune,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> SyncPage[FineTune]:
- """List your organization's fine-tuning jobs"""
- return self._get_api_list(
- "/fine-tunes",
- page=SyncPage[FineTune],
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- model=FineTune,
- )
-
- def cancel(
- self,
- fine_tune_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTune:
- """
- Immediately cancel a fine-tune job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- f"/fine-tunes/{fine_tune_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTune,
- )
-
- @overload
- def list_events(
- self,
- fine_tune_id: str,
- *,
- stream: Literal[False] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 86400,
- ) -> FineTuneEventsListResponse:
- """
- Get fine-grained status updates for a fine-tune job.
-
- Args:
- stream: Whether to stream events for the fine-tune job. If set to true, events will be
- sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available. The stream will terminate with a `data: [DONE]`
- message when the job is finished (succeeded, cancelled, or failed).
-
- If set to false, only events generated so far will be returned.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- def list_events(
- self,
- fine_tune_id: str,
- *,
- stream: Literal[True],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 86400,
- ) -> Stream[FineTuneEvent]:
- """
- Get fine-grained status updates for a fine-tune job.
-
- Args:
- stream: Whether to stream events for the fine-tune job. If set to true, events will be
- sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available. The stream will terminate with a `data: [DONE]`
- message when the job is finished (succeeded, cancelled, or failed).
-
- If set to false, only events generated so far will be returned.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- def list_events(
- self,
- fine_tune_id: str,
- *,
- stream: bool,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 86400,
- ) -> FineTuneEventsListResponse | Stream[FineTuneEvent]:
- """
- Get fine-grained status updates for a fine-tune job.
-
- Args:
- stream: Whether to stream events for the fine-tune job. If set to true, events will be
- sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available. The stream will terminate with a `data: [DONE]`
- message when the job is finished (succeeded, cancelled, or failed).
-
- If set to false, only events generated so far will be returned.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- def list_events(
- self,
- fine_tune_id: str,
- *,
- stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 86400,
- ) -> FineTuneEventsListResponse | Stream[FineTuneEvent]:
- return self._get(
- f"/fine-tunes/{fine_tune_id}/events",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"stream": stream}, fine_tune_list_events_params.FineTuneListEventsParams),
- ),
- cast_to=FineTuneEventsListResponse,
- stream=stream or False,
- stream_cls=Stream[FineTuneEvent],
- )
-
-
-class AsyncFineTunes(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncFineTunesWithRawResponse:
- return AsyncFineTunesWithRawResponse(self)
-
- async def create(
- self,
- *,
- training_file: str,
- batch_size: Optional[int] | NotGiven = NOT_GIVEN,
- classification_betas: Optional[List[float]] | NotGiven = NOT_GIVEN,
- classification_n_classes: Optional[int] | NotGiven = NOT_GIVEN,
- classification_positive_class: Optional[str] | NotGiven = NOT_GIVEN,
- compute_classification_metrics: Optional[bool] | NotGiven = NOT_GIVEN,
- hyperparameters: fine_tune_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
- learning_rate_multiplier: Optional[float] | NotGiven = NOT_GIVEN,
- model: Union[str, Literal["ada", "babbage", "curie", "davinci"], None] | NotGiven = NOT_GIVEN,
- prompt_loss_weight: Optional[float] | NotGiven = NOT_GIVEN,
- suffix: Optional[str] | NotGiven = NOT_GIVEN,
- validation_file: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTune:
- """
- Creates a job that fine-tunes a specified model from a given dataset.
-
- Response includes details of the enqueued job including job status and the name
- of the fine-tuned models once complete.
-
- [Learn more about fine-tuning](https://platform.openai.com/docs/guides/legacy-fine-tuning)
-
- Args:
- training_file: The ID of an uploaded file that contains training data.
-
- See [upload file](https://platform.openai.com/docs/api-reference/files/upload)
- for how to upload a file.
-
- Your dataset must be formatted as a JSONL file, where each training example is a
- JSON object with the keys "prompt" and "completion". Additionally, you must
- upload your file with the purpose `fine-tune`.
-
- See the
- [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
- for more details.
-
- batch_size: The batch size to use for training. The batch size is the number of training
- examples used to train a single forward and backward pass.
-
- By default, the batch size will be dynamically configured to be ~0.2% of the
- number of examples in the training set, capped at 256 - in general, we've found
- that larger batch sizes tend to work better for larger datasets.
-
- classification_betas: If this is provided, we calculate F-beta scores at the specified beta values.
- The F-beta score is a generalization of F-1 score. This is only used for binary
- classification.
-
- With a beta of 1 (i.e. the F-1 score), precision and recall are given the same
- weight. A larger beta score puts more weight on recall and less on precision. A
- smaller beta score puts more weight on precision and less on recall.
-
- classification_n_classes: The number of classes in a classification task.
-
- This parameter is required for multiclass classification.
-
- classification_positive_class: The positive class in binary classification.
-
- This parameter is needed to generate precision, recall, and F1 metrics when
- doing binary classification.
-
- compute_classification_metrics: If set, we calculate classification-specific metrics such as accuracy and F-1
- score using the validation set at the end of every epoch. These metrics can be
- viewed in the
- [results file](https://platform.openai.com/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
-
- In order to compute classification metrics, you must provide a
- `validation_file`. Additionally, you must specify `classification_n_classes` for
- multiclass classification or `classification_positive_class` for binary
- classification.
-
- hyperparameters: The hyperparameters used for the fine-tuning job.
-
- learning_rate_multiplier: The learning rate multiplier to use for training. The fine-tuning learning rate
- is the original learning rate used for pretraining multiplied by this value.
-
- By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on
- final `batch_size` (larger learning rates tend to perform better with larger
- batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to
- see what produces the best results.
-
- model: The name of the base model to fine-tune. You can select one of "ada", "babbage",
- "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before
- 2023-08-22. To learn more about these models, see the
- [Models](https://platform.openai.com/docs/models) documentation.
-
- prompt_loss_weight: The weight to use for loss on the prompt tokens. This controls how much the
- model tries to learn to generate the prompt (as compared to the completion which
- always has a weight of 1.0), and can add a stabilizing effect to training when
- completions are short.
-
- If prompts are extremely long (relative to completions), it may make sense to
- reduce this weight so as to avoid over-prioritizing learning the prompt.
-
- suffix: A string of up to 40 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
-
- validation_file: The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the
- [fine-tuning results file](https://platform.openai.com/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
- Your train and validation data should be mutually exclusive.
-
- Your dataset must be formatted as a JSONL file, where each validation example is
- a JSON object with the keys "prompt" and "completion". Additionally, you must
- upload your file with the purpose `fine-tune`.
-
- See the
- [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
- for more details.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/fine-tunes",
- body=maybe_transform(
- {
- "training_file": training_file,
- "batch_size": batch_size,
- "classification_betas": classification_betas,
- "classification_n_classes": classification_n_classes,
- "classification_positive_class": classification_positive_class,
- "compute_classification_metrics": compute_classification_metrics,
- "hyperparameters": hyperparameters,
- "learning_rate_multiplier": learning_rate_multiplier,
- "model": model,
- "prompt_loss_weight": prompt_loss_weight,
- "suffix": suffix,
- "validation_file": validation_file,
- },
- fine_tune_create_params.FineTuneCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTune,
- )
-
- async def retrieve(
- self,
- fine_tune_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTune:
- """
- Gets info about the fine-tune job.
-
- [Learn more about fine-tuning](https://platform.openai.com/docs/guides/legacy-fine-tuning)
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- f"/fine-tunes/{fine_tune_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTune,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncPaginator[FineTune, AsyncPage[FineTune]]:
- """List your organization's fine-tuning jobs"""
- return self._get_api_list(
- "/fine-tunes",
- page=AsyncPage[FineTune],
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- model=FineTune,
- )
-
- async def cancel(
- self,
- fine_tune_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> FineTune:
- """
- Immediately cancel a fine-tune job.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- f"/fine-tunes/{fine_tune_id}/cancel",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=FineTune,
- )
-
- @overload
- async def list_events(
- self,
- fine_tune_id: str,
- *,
- stream: Literal[False] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 86400,
- ) -> FineTuneEventsListResponse:
- """
- Get fine-grained status updates for a fine-tune job.
-
- Args:
- stream: Whether to stream events for the fine-tune job. If set to true, events will be
- sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available. The stream will terminate with a `data: [DONE]`
- message when the job is finished (succeeded, cancelled, or failed).
-
- If set to false, only events generated so far will be returned.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- async def list_events(
- self,
- fine_tune_id: str,
- *,
- stream: Literal[True],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 86400,
- ) -> AsyncStream[FineTuneEvent]:
- """
- Get fine-grained status updates for a fine-tune job.
-
- Args:
- stream: Whether to stream events for the fine-tune job. If set to true, events will be
- sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available. The stream will terminate with a `data: [DONE]`
- message when the job is finished (succeeded, cancelled, or failed).
-
- If set to false, only events generated so far will be returned.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- async def list_events(
- self,
- fine_tune_id: str,
- *,
- stream: bool,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 86400,
- ) -> FineTuneEventsListResponse | AsyncStream[FineTuneEvent]:
- """
- Get fine-grained status updates for a fine-tune job.
-
- Args:
- stream: Whether to stream events for the fine-tune job. If set to true, events will be
- sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available. The stream will terminate with a `data: [DONE]`
- message when the job is finished (succeeded, cancelled, or failed).
-
- If set to false, only events generated so far will be returned.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- async def list_events(
- self,
- fine_tune_id: str,
- *,
- stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 86400,
- ) -> FineTuneEventsListResponse | AsyncStream[FineTuneEvent]:
- return await self._get(
- f"/fine-tunes/{fine_tune_id}/events",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform({"stream": stream}, fine_tune_list_events_params.FineTuneListEventsParams),
- ),
- cast_to=FineTuneEventsListResponse,
- stream=stream or False,
- stream_cls=AsyncStream[FineTuneEvent],
- )
-
-
-class FineTunesWithRawResponse:
- def __init__(self, fine_tunes: FineTunes) -> None:
- self.create = to_raw_response_wrapper(
- fine_tunes.create,
- )
- self.retrieve = to_raw_response_wrapper(
- fine_tunes.retrieve,
- )
- self.list = to_raw_response_wrapper(
- fine_tunes.list,
- )
- self.cancel = to_raw_response_wrapper(
- fine_tunes.cancel,
- )
- self.list_events = to_raw_response_wrapper(
- fine_tunes.list_events,
- )
-
-
-class AsyncFineTunesWithRawResponse:
- def __init__(self, fine_tunes: AsyncFineTunes) -> None:
- self.create = async_to_raw_response_wrapper(
- fine_tunes.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- fine_tunes.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- fine_tunes.list,
- )
- self.cancel = async_to_raw_response_wrapper(
- fine_tunes.cancel,
- )
- self.list_events = async_to_raw_response_wrapper(
- fine_tunes.list_events,
- )
diff --git a/src/openai/resources/fine_tuning/jobs.py b/src/openai/resources/fine_tuning/jobs.py
index a8f24efce5..7537b48daa 100644
--- a/src/openai/resources/fine_tuning/jobs.py
+++ b/src/openai/resources/fine_tuning/jobs.py
@@ -49,7 +49,8 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FineTuningJob:
"""
- Creates a job that fine-tunes a specified model from a given dataset.
+ Creates a fine-tuning job which begins the process of creating a new model from
+ a given dataset.
Response includes details of the enqueued job including job status and the name
of the fine-tuned models once complete.
@@ -299,7 +300,8 @@ async def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> FineTuningJob:
"""
- Creates a job that fine-tunes a specified model from a given dataset.
+ Creates a fine-tuning job which begins the process of creating a new model from
+ a given dataset.
Response includes details of the enqueued job including job status and the name
of the fine-tuned models once complete.
diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py
index df2b580587..d6108e1eed 100644
--- a/src/openai/types/__init__.py
+++ b/src/openai/types/__init__.py
@@ -2,33 +2,26 @@
from __future__ import annotations
-from .edit import Edit as Edit
from .image import Image as Image
from .model import Model as Model
from .shared import FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters
from .embedding import Embedding as Embedding
-from .fine_tune import FineTune as FineTune
from .completion import Completion as Completion
from .moderation import Moderation as Moderation
from .file_object import FileObject as FileObject
from .file_content import FileContent as FileContent
from .file_deleted import FileDeleted as FileDeleted
from .model_deleted import ModelDeleted as ModelDeleted
-from .fine_tune_event import FineTuneEvent as FineTuneEvent
from .images_response import ImagesResponse as ImagesResponse
from .completion_usage import CompletionUsage as CompletionUsage
from .file_list_params import FileListParams as FileListParams
from .completion_choice import CompletionChoice as CompletionChoice
from .image_edit_params import ImageEditParams as ImageEditParams
-from .edit_create_params import EditCreateParams as EditCreateParams
from .file_create_params import FileCreateParams as FileCreateParams
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
-from .fine_tune_create_params import FineTuneCreateParams as FineTuneCreateParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse
-from .fine_tune_list_events_params import FineTuneListEventsParams as FineTuneListEventsParams
from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
-from .fine_tune_events_list_response import FineTuneEventsListResponse as FineTuneEventsListResponse
diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py
index 49807a372e..6b38a89263 100644
--- a/src/openai/types/chat/completion_create_params.py
+++ b/src/openai/types/chat/completion_create_params.py
@@ -174,7 +174,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
will not call a function and instead generates a message. `auto` means the model
can pick between generating a message or calling a function. Specifying a
particular function via
- `{"type: "function", "function": {"name": "my_function"}}` forces the model to
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that function.
`none` is the default when no functions are present. `auto` is the default if
diff --git a/src/openai/types/completion_create_params.py b/src/openai/types/completion_create_params.py
index ab6609a06b..e14c2860df 100644
--- a/src/openai/types/completion_create_params.py
+++ b/src/openai/types/completion_create_params.py
@@ -9,23 +9,7 @@
class CompletionCreateParamsBase(TypedDict, total=False):
- model: Required[
- Union[
- str,
- Literal[
- "babbage-002",
- "davinci-002",
- "gpt-3.5-turbo-instruct",
- "text-davinci-003",
- "text-davinci-002",
- "text-davinci-001",
- "code-davinci-002",
- "text-curie-001",
- "text-babbage-001",
- "text-ada-001",
- ],
- ]
- ]
+ model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]]
"""ID of the model to use.
You can use the
@@ -75,12 +59,11 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
- [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to
- convert text to token IDs. Mathematically, the bias is added to the logits
- generated by the model prior to sampling. The exact effect will vary per model,
- but values between -1 and 1 should decrease or increase likelihood of selection;
- values like -100 or 100 should result in a ban or exclusive selection of the
- relevant token.
+ [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
+ Mathematically, the bias is added to the logits generated by the model prior to
+ sampling. The exact effect will vary per model, but values between -1 and 1
+ should decrease or increase likelihood of selection; values like -100 or 100
+ should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
diff --git a/src/openai/types/edit.py b/src/openai/types/edit.py
deleted file mode 100644
index 48bca2987b..0000000000
--- a/src/openai/types/edit.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from typing import List
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .completion_usage import CompletionUsage
-
-__all__ = ["Edit", "Choice"]
-
-
-class Choice(BaseModel):
- finish_reason: Literal["stop", "length"]
- """The reason the model stopped generating tokens.
-
- This will be `stop` if the model hit a natural stop point or a provided stop
- sequence, `length` if the maximum number of tokens specified in the request was
- reached, or `content_filter` if content was omitted due to a flag from our
- content filters.
- """
-
- index: int
- """The index of the choice in the list of choices."""
-
- text: str
- """The edited result."""
-
-
-class Edit(BaseModel):
- choices: List[Choice]
- """A list of edit choices. Can be more than one if `n` is greater than 1."""
-
- created: int
- """The Unix timestamp (in seconds) of when the edit was created."""
-
- object: Literal["edit"]
- """The object type, which is always `edit`."""
-
- usage: CompletionUsage
- """Usage statistics for the completion request."""
diff --git a/src/openai/types/edit_create_params.py b/src/openai/types/edit_create_params.py
deleted file mode 100644
index a23b79c369..0000000000
--- a/src/openai/types/edit_create_params.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-from typing import Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["EditCreateParams"]
-
-
-class EditCreateParams(TypedDict, total=False):
- instruction: Required[str]
- """The instruction that tells the model how to edit the prompt."""
-
- model: Required[Union[str, Literal["text-davinci-edit-001", "code-davinci-edit-001"]]]
- """ID of the model to use.
-
- You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with
- this endpoint.
- """
-
- input: Optional[str]
- """The input text to use as a starting point for the edit."""
-
- n: Optional[int]
- """How many edits to generate for the input and instruction."""
-
- temperature: Optional[float]
- """What sampling temperature to use, between 0 and 2.
-
- Higher values like 0.8 will make the output more random, while lower values like
- 0.2 will make it more focused and deterministic.
-
- We generally recommend altering this or `top_p` but not both.
- """
-
- top_p: Optional[float]
- """
- An alternative to sampling with temperature, called nucleus sampling, where the
- model considers the results of the tokens with top_p probability mass. So 0.1
- means only the tokens comprising the top 10% probability mass are considered.
-
- We generally recommend altering this or `temperature` but not both.
- """
diff --git a/src/openai/types/fine_tune.py b/src/openai/types/fine_tune.py
deleted file mode 100644
index d1a063a065..0000000000
--- a/src/openai/types/fine_tune.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .file_object import FileObject
-from .fine_tune_event import FineTuneEvent
-
-__all__ = ["FineTune", "Hyperparams"]
-
-
-class Hyperparams(BaseModel):
- batch_size: int
- """The batch size to use for training.
-
- The batch size is the number of training examples used to train a single forward
- and backward pass.
- """
-
- learning_rate_multiplier: float
- """The learning rate multiplier to use for training."""
-
- n_epochs: int
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
-
- prompt_loss_weight: float
- """The weight to use for loss on the prompt tokens."""
-
- classification_n_classes: Optional[int] = None
- """The number of classes to use for computing classification metrics."""
-
- classification_positive_class: Optional[str] = None
- """The positive class to use for computing classification metrics."""
-
- compute_classification_metrics: Optional[bool] = None
- """
- The classification metrics to compute using the validation dataset at the end of
- every epoch.
- """
-
-
-class FineTune(BaseModel):
- id: str
- """The object identifier, which can be referenced in the API endpoints."""
-
- created_at: int
- """The Unix timestamp (in seconds) for when the fine-tuning job was created."""
-
- fine_tuned_model: Optional[str] = None
- """The name of the fine-tuned model that is being created."""
-
- hyperparams: Hyperparams
- """The hyperparameters used for the fine-tuning job.
-
- See the
- [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/hyperparameters)
- for more details.
- """
-
- model: str
- """The base model that is being fine-tuned."""
-
- object: Literal["fine-tune"]
- """The object type, which is always "fine-tune"."""
-
- organization_id: str
- """The organization that owns the fine-tuning job."""
-
- result_files: List[FileObject]
- """The compiled results files for the fine-tuning job."""
-
- status: str
- """
- The current status of the fine-tuning job, which can be either `created`,
- `running`, `succeeded`, `failed`, or `cancelled`.
- """
-
- training_files: List[FileObject]
- """The list of files used for training."""
-
- updated_at: int
- """The Unix timestamp (in seconds) for when the fine-tuning job was last updated."""
-
- validation_files: List[FileObject]
- """The list of files used for validation."""
-
- events: Optional[List[FineTuneEvent]] = None
- """
- The list of events that have been observed in the lifecycle of the FineTune job.
- """
diff --git a/src/openai/types/fine_tune_create_params.py b/src/openai/types/fine_tune_create_params.py
deleted file mode 100644
index 1be9c9ea04..0000000000
--- a/src/openai/types/fine_tune_create_params.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-from typing import List, Union, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FineTuneCreateParams", "Hyperparameters"]
-
-
-class FineTuneCreateParams(TypedDict, total=False):
- training_file: Required[str]
- """The ID of an uploaded file that contains training data.
-
- See [upload file](https://platform.openai.com/docs/api-reference/files/upload)
- for how to upload a file.
-
- Your dataset must be formatted as a JSONL file, where each training example is a
- JSON object with the keys "prompt" and "completion". Additionally, you must
- upload your file with the purpose `fine-tune`.
-
- See the
- [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
- for more details.
- """
-
- batch_size: Optional[int]
- """The batch size to use for training.
-
- The batch size is the number of training examples used to train a single forward
- and backward pass.
-
- By default, the batch size will be dynamically configured to be ~0.2% of the
- number of examples in the training set, capped at 256 - in general, we've found
- that larger batch sizes tend to work better for larger datasets.
- """
-
- classification_betas: Optional[List[float]]
- """If this is provided, we calculate F-beta scores at the specified beta values.
-
- The F-beta score is a generalization of F-1 score. This is only used for binary
- classification.
-
- With a beta of 1 (i.e. the F-1 score), precision and recall are given the same
- weight. A larger beta score puts more weight on recall and less on precision. A
- smaller beta score puts more weight on precision and less on recall.
- """
-
- classification_n_classes: Optional[int]
- """The number of classes in a classification task.
-
- This parameter is required for multiclass classification.
- """
-
- classification_positive_class: Optional[str]
- """The positive class in binary classification.
-
- This parameter is needed to generate precision, recall, and F1 metrics when
- doing binary classification.
- """
-
- compute_classification_metrics: Optional[bool]
- """
- If set, we calculate classification-specific metrics such as accuracy and F-1
- score using the validation set at the end of every epoch. These metrics can be
- viewed in the
- [results file](https://platform.openai.com/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
-
- In order to compute classification metrics, you must provide a
- `validation_file`. Additionally, you must specify `classification_n_classes` for
- multiclass classification or `classification_positive_class` for binary
- classification.
- """
-
- hyperparameters: Hyperparameters
- """The hyperparameters used for the fine-tuning job."""
-
- learning_rate_multiplier: Optional[float]
- """
- The learning rate multiplier to use for training. The fine-tuning learning rate
- is the original learning rate used for pretraining multiplied by this value.
-
- By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on
- final `batch_size` (larger learning rates tend to perform better with larger
- batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to
- see what produces the best results.
- """
-
- model: Union[str, Literal["ada", "babbage", "curie", "davinci"], None]
- """The name of the base model to fine-tune.
-
- You can select one of "ada", "babbage", "curie", "davinci", or a fine-tuned
- model created after 2022-04-21 and before 2023-08-22. To learn more about these
- models, see the [Models](https://platform.openai.com/docs/models) documentation.
- """
-
- prompt_loss_weight: Optional[float]
- """The weight to use for loss on the prompt tokens.
-
- This controls how much the model tries to learn to generate the prompt (as
- compared to the completion which always has a weight of 1.0), and can add a
- stabilizing effect to training when completions are short.
-
- If prompts are extremely long (relative to completions), it may make sense to
- reduce this weight so as to avoid over-prioritizing learning the prompt.
- """
-
- suffix: Optional[str]
- """
- A string of up to 40 characters that will be added to your fine-tuned model
- name.
-
- For example, a `suffix` of "custom-model-name" would produce a model name like
- `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
- """
-
- validation_file: Optional[str]
- """The ID of an uploaded file that contains validation data.
-
- If you provide this file, the data is used to generate validation metrics
- periodically during fine-tuning. These metrics can be viewed in the
- [fine-tuning results file](https://platform.openai.com/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
- Your train and validation data should be mutually exclusive.
-
- Your dataset must be formatted as a JSONL file, where each validation example is
- a JSON object with the keys "prompt" and "completion". Additionally, you must
- upload your file with the purpose `fine-tune`.
-
- See the
- [fine-tuning guide](https://platform.openai.com/docs/guides/legacy-fine-tuning/creating-training-data)
- for more details.
- """
-
-
-class Hyperparameters(TypedDict, total=False):
- n_epochs: Union[Literal["auto"], int]
- """The number of epochs to train the model for.
-
- An epoch refers to one full cycle through the training dataset.
- """
diff --git a/src/openai/types/fine_tune_event.py b/src/openai/types/fine_tune_event.py
deleted file mode 100644
index 299f0de24b..0000000000
--- a/src/openai/types/fine_tune_event.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["FineTuneEvent"]
-
-
-class FineTuneEvent(BaseModel):
- created_at: int
-
- level: str
-
- message: str
-
- object: Literal["fine-tune-event"]
diff --git a/src/openai/types/fine_tune_events_list_response.py b/src/openai/types/fine_tune_events_list_response.py
deleted file mode 100644
index c69746104d..0000000000
--- a/src/openai/types/fine_tune_events_list_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from typing import List
-from typing_extensions import Literal
-
-from .._models import BaseModel
-from .fine_tune_event import FineTuneEvent
-
-__all__ = ["FineTuneEventsListResponse"]
-
-
-class FineTuneEventsListResponse(BaseModel):
- data: List[FineTuneEvent]
-
- object: Literal["list"]
diff --git a/src/openai/types/fine_tune_list_events_params.py b/src/openai/types/fine_tune_list_events_params.py
deleted file mode 100644
index 1f23b108e6..0000000000
--- a/src/openai/types/fine_tune_list_events_params.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-from typing import Union
-from typing_extensions import Literal, Required, TypedDict
-
-__all__ = ["FineTuneListEventsParamsBase", "FineTuneListEventsParamsNonStreaming", "FineTuneListEventsParamsStreaming"]
-
-
-class FineTuneListEventsParamsBase(TypedDict, total=False):
- pass
-
-
-class FineTuneListEventsParamsNonStreaming(FineTuneListEventsParamsBase):
- stream: Literal[False]
- """Whether to stream events for the fine-tune job.
-
- If set to true, events will be sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available. The stream will terminate with a `data: [DONE]`
- message when the job is finished (succeeded, cancelled, or failed).
-
- If set to false, only events generated so far will be returned.
- """
-
-
-class FineTuneListEventsParamsStreaming(FineTuneListEventsParamsBase):
- stream: Required[Literal[True]]
- """Whether to stream events for the fine-tune job.
-
- If set to true, events will be sent as data-only
- [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
- as they become available. The stream will terminate with a `data: [DONE]`
- message when the job is finished (succeeded, cancelled, or failed).
-
- If set to false, only events generated so far will be returned.
- """
-
-
-FineTuneListEventsParams = Union[FineTuneListEventsParamsNonStreaming, FineTuneListEventsParamsStreaming]
diff --git a/tests/api_resources/test_edits.py b/tests/api_resources/test_edits.py
deleted file mode 100644
index 76069d6b83..0000000000
--- a/tests/api_resources/test_edits.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-import os
-
-import pytest
-
-from openai import OpenAI, AsyncOpenAI
-from tests.utils import assert_matches_type
-from openai.types import Edit
-from openai._client import OpenAI, AsyncOpenAI
-
-# pyright: reportDeprecated=false
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-api_key = "My API Key"
-
-
-class TestEdits:
- strict_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- loose_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
- parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
-
- @parametrize
- def test_method_create(self, client: OpenAI) -> None:
- with pytest.warns(DeprecationWarning):
- edit = client.edits.create(
- instruction="Fix the spelling mistakes.",
- model="text-davinci-edit-001",
- )
- assert_matches_type(Edit, edit, path=["response"])
-
- @parametrize
- def test_method_create_with_all_params(self, client: OpenAI) -> None:
- with pytest.warns(DeprecationWarning):
- edit = client.edits.create(
- instruction="Fix the spelling mistakes.",
- model="text-davinci-edit-001",
- input="What day of the wek is it?",
- n=1,
- temperature=1,
- top_p=1,
- )
- assert_matches_type(Edit, edit, path=["response"])
-
- @parametrize
- def test_raw_response_create(self, client: OpenAI) -> None:
- with pytest.warns(DeprecationWarning):
- response = client.edits.with_raw_response.create(
- instruction="Fix the spelling mistakes.",
- model="text-davinci-edit-001",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- edit = response.parse()
- assert_matches_type(Edit, edit, path=["response"])
-
-
-class TestAsyncEdits:
- strict_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- loose_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
- parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
-
- @parametrize
- async def test_method_create(self, client: AsyncOpenAI) -> None:
- with pytest.warns(DeprecationWarning):
- edit = await client.edits.create(
- instruction="Fix the spelling mistakes.",
- model="text-davinci-edit-001",
- )
- assert_matches_type(Edit, edit, path=["response"])
-
- @parametrize
- async def test_method_create_with_all_params(self, client: AsyncOpenAI) -> None:
- with pytest.warns(DeprecationWarning):
- edit = await client.edits.create(
- instruction="Fix the spelling mistakes.",
- model="text-davinci-edit-001",
- input="What day of the wek is it?",
- n=1,
- temperature=1,
- top_p=1,
- )
- assert_matches_type(Edit, edit, path=["response"])
-
- @parametrize
- async def test_raw_response_create(self, client: AsyncOpenAI) -> None:
- with pytest.warns(DeprecationWarning):
- response = await client.edits.with_raw_response.create(
- instruction="Fix the spelling mistakes.",
- model="text-davinci-edit-001",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- edit = response.parse()
- assert_matches_type(Edit, edit, path=["response"])
diff --git a/tests/api_resources/test_fine_tunes.py b/tests/api_resources/test_fine_tunes.py
deleted file mode 100644
index edaf784848..0000000000
--- a/tests/api_resources/test_fine_tunes.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-import os
-
-import pytest
-
-from openai import OpenAI, AsyncOpenAI
-from tests.utils import assert_matches_type
-from openai.types import FineTune, FineTuneEventsListResponse
-from openai._client import OpenAI, AsyncOpenAI
-from openai.pagination import SyncPage, AsyncPage
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-api_key = "My API Key"
-
-
-class TestFineTunes:
- strict_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- loose_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
- parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
-
- @parametrize
- def test_method_create(self, client: OpenAI) -> None:
- fine_tune = client.fine_tunes.create(
- training_file="file-abc123",
- )
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- def test_method_create_with_all_params(self, client: OpenAI) -> None:
- fine_tune = client.fine_tunes.create(
- training_file="file-abc123",
- batch_size=0,
- classification_betas=[0.6, 1, 1.5, 2],
- classification_n_classes=0,
- classification_positive_class="string",
- compute_classification_metrics=True,
- hyperparameters={"n_epochs": "auto"},
- learning_rate_multiplier=0,
- model="curie",
- prompt_loss_weight=0,
- suffix="x",
- validation_file="file-abc123",
- )
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- def test_raw_response_create(self, client: OpenAI) -> None:
- response = client.fine_tunes.with_raw_response.create(
- training_file="file-abc123",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- def test_method_retrieve(self, client: OpenAI) -> None:
- fine_tune = client.fine_tunes.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- def test_raw_response_retrieve(self, client: OpenAI) -> None:
- response = client.fine_tunes.with_raw_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- def test_method_list(self, client: OpenAI) -> None:
- fine_tune = client.fine_tunes.list()
- assert_matches_type(SyncPage[FineTune], fine_tune, path=["response"])
-
- @parametrize
- def test_raw_response_list(self, client: OpenAI) -> None:
- response = client.fine_tunes.with_raw_response.list()
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(SyncPage[FineTune], fine_tune, path=["response"])
-
- @parametrize
- def test_method_cancel(self, client: OpenAI) -> None:
- fine_tune = client.fine_tunes.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- def test_raw_response_cancel(self, client: OpenAI) -> None:
- response = client.fine_tunes.with_raw_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- def test_method_list_events_overload_1(self, client: OpenAI) -> None:
- fine_tune = client.fine_tunes.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuneEventsListResponse, fine_tune, path=["response"])
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- def test_method_list_events_with_all_params_overload_1(self, client: OpenAI) -> None:
- fine_tune = client.fine_tunes.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- stream=False,
- )
- assert_matches_type(FineTuneEventsListResponse, fine_tune, path=["response"])
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- def test_raw_response_list_events_overload_1(self, client: OpenAI) -> None:
- response = client.fine_tunes.with_raw_response.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(FineTuneEventsListResponse, fine_tune, path=["response"])
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- def test_method_list_events_overload_2(self, client: OpenAI) -> None:
- client.fine_tunes.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- stream=True,
- )
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- def test_raw_response_list_events_overload_2(self, client: OpenAI) -> None:
- response = client.fine_tunes.with_raw_response.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- stream=True,
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- response.parse()
-
-
-class TestAsyncFineTunes:
- strict_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
- loose_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
- parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
-
- @parametrize
- async def test_method_create(self, client: AsyncOpenAI) -> None:
- fine_tune = await client.fine_tunes.create(
- training_file="file-abc123",
- )
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- async def test_method_create_with_all_params(self, client: AsyncOpenAI) -> None:
- fine_tune = await client.fine_tunes.create(
- training_file="file-abc123",
- batch_size=0,
- classification_betas=[0.6, 1, 1.5, 2],
- classification_n_classes=0,
- classification_positive_class="string",
- compute_classification_metrics=True,
- hyperparameters={"n_epochs": "auto"},
- learning_rate_multiplier=0,
- model="curie",
- prompt_loss_weight=0,
- suffix="x",
- validation_file="file-abc123",
- )
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- async def test_raw_response_create(self, client: AsyncOpenAI) -> None:
- response = await client.fine_tunes.with_raw_response.create(
- training_file="file-abc123",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- async def test_method_retrieve(self, client: AsyncOpenAI) -> None:
- fine_tune = await client.fine_tunes.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- async def test_raw_response_retrieve(self, client: AsyncOpenAI) -> None:
- response = await client.fine_tunes.with_raw_response.retrieve(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- async def test_method_list(self, client: AsyncOpenAI) -> None:
- fine_tune = await client.fine_tunes.list()
- assert_matches_type(AsyncPage[FineTune], fine_tune, path=["response"])
-
- @parametrize
- async def test_raw_response_list(self, client: AsyncOpenAI) -> None:
- response = await client.fine_tunes.with_raw_response.list()
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(AsyncPage[FineTune], fine_tune, path=["response"])
-
- @parametrize
- async def test_method_cancel(self, client: AsyncOpenAI) -> None:
- fine_tune = await client.fine_tunes.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @parametrize
- async def test_raw_response_cancel(self, client: AsyncOpenAI) -> None:
- response = await client.fine_tunes.with_raw_response.cancel(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(FineTune, fine_tune, path=["response"])
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- async def test_method_list_events_overload_1(self, client: AsyncOpenAI) -> None:
- fine_tune = await client.fine_tunes.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert_matches_type(FineTuneEventsListResponse, fine_tune, path=["response"])
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- async def test_method_list_events_with_all_params_overload_1(self, client: AsyncOpenAI) -> None:
- fine_tune = await client.fine_tunes.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- stream=False,
- )
- assert_matches_type(FineTuneEventsListResponse, fine_tune, path=["response"])
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- async def test_raw_response_list_events_overload_1(self, client: AsyncOpenAI) -> None:
- response = await client.fine_tunes.with_raw_response.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- fine_tune = response.parse()
- assert_matches_type(FineTuneEventsListResponse, fine_tune, path=["response"])
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- async def test_method_list_events_overload_2(self, client: AsyncOpenAI) -> None:
- await client.fine_tunes.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- stream=True,
- )
-
- @pytest.mark.skip(reason="Prism chokes on this")
- @parametrize
- async def test_raw_response_list_events_overload_2(self, client: AsyncOpenAI) -> None:
- response = await client.fine_tunes.with_raw_response.list_events(
- "ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- stream=True,
- )
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- response.parse()