From bcaed822cd49aa8d5f51fb73ef8a565baf706cec Mon Sep 17 00:00:00 2001 From: Ives van Hoorne Date: Thu, 13 Feb 2025 17:11:57 +0000 Subject: [PATCH] i give up, include openapi generated files --- .github/workflows/_integration_tests.yml | 4 - .github/workflows/_tests.yml | 4 - .gitignore | 3 - MANIFEST.in | 1 - pyproject.toml | 4 - src/together/generated/__init__.py | 224 +++ src/together/generated/api/__init__.py | 14 + src/together/generated/api/audio_api.py | 302 +++ src/together/generated/api/chat_api.py | 308 ++++ src/together/generated/api/completion_api.py | 308 ++++ src/together/generated/api/embeddings_api.py | 308 ++++ src/together/generated/api/endpoints_api.py | 1354 ++++++++++++++ src/together/generated/api/files_api.py | 996 ++++++++++ src/together/generated/api/fine_tuning_api.py | 1630 +++++++++++++++++ src/together/generated/api/hardware_api.py | 298 +++ src/together/generated/api/images_api.py | 291 +++ src/together/generated/api/models_api.py | 279 +++ src/together/generated/api/rerank_api.py | 308 ++++ src/together/generated/api_client.py | 758 ++++++++ src/together/generated/api_response.py | 20 + src/together/generated/configuration.py | 583 ++++++ src/together/generated/docs/AudioApi.md | 88 + .../generated/docs/AudioSpeechRequest.md | 34 + .../generated/docs/AudioSpeechRequestModel.md | 27 + .../generated/docs/AudioSpeechRequestVoice.md | 27 + .../generated/docs/AudioSpeechStreamChunk.md | 29 + .../generated/docs/AudioSpeechStreamEvent.md | 27 + .../docs/AudioSpeechStreamResponse.md | 27 + src/together/generated/docs/Autoscaling.md | 29 + src/together/generated/docs/ChatApi.md | 93 + .../ChatCompletionAssistantMessageParam.md | 31 + .../generated/docs/ChatCompletionChoice.md | 30 + .../docs/ChatCompletionChoiceDelta.md | 31 + .../ChatCompletionChoiceDeltaFunctionCall.md | 28 + .../docs/ChatCompletionChoicesDataInner.md | 32 + .../ChatCompletionChoicesDataInnerLogprobs.md | 29 + .../generated/docs/ChatCompletionChunk.md | 33 + .../docs/ChatCompletionChunkChoicesInner.md | 31 + .../generated/docs/ChatCompletionEvent.md | 27 + .../ChatCompletionFunctionMessageParam.md | 29 + .../generated/docs/ChatCompletionMessage.md | 30 + .../docs/ChatCompletionMessageFunctionCall.md | 28 + .../docs/ChatCompletionMessageParam.md | 32 + .../generated/docs/ChatCompletionRequest.md | 49 + .../docs/ChatCompletionRequestFunctionCall.md | 27 + .../ChatCompletionRequestFunctionCallOneOf.md | 27 + .../ChatCompletionRequestMessagesInner.md | 28 + .../docs/ChatCompletionRequestModel.md | 27 + .../ChatCompletionRequestResponseFormat.md | 29 + .../docs/ChatCompletionRequestToolChoice.md | 31 + .../generated/docs/ChatCompletionResponse.md | 32 + .../generated/docs/ChatCompletionStream.md | 27 + .../docs/ChatCompletionSystemMessageParam.md | 29 + .../generated/docs/ChatCompletionToken.md | 30 + .../generated/docs/ChatCompletionTool.md | 28 + .../docs/ChatCompletionToolFunction.md | 29 + .../docs/ChatCompletionToolMessageParam.md | 29 + .../docs/ChatCompletionUserMessageParam.md | 29 + src/together/generated/docs/CompletionApi.md | 93 + .../generated/docs/CompletionChoice.md | 27 + .../docs/CompletionChoicesDataInner.md | 30 + .../generated/docs/CompletionChunk.md | 32 + .../generated/docs/CompletionChunkUsage.md | 29 + .../generated/docs/CompletionEvent.md | 27 + .../generated/docs/CompletionRequest.md | 44 + .../generated/docs/CompletionRequestModel.md | 27 + .../docs/CompletionRequestSafetyModel.md | 27 + .../generated/docs/CompletionResponse.md | 33 + .../generated/docs/CompletionStream.md | 27 + .../generated/docs/CompletionToken.md | 30 + .../generated/docs/CreateEndpointRequest.md | 33 + .../generated/docs/DedicatedEndpoint.md | 38 + src/together/generated/docs/EmbeddingsApi.md | 93 + .../generated/docs/EmbeddingsRequest.md | 28 + .../generated/docs/EmbeddingsRequestInput.md | 26 + .../generated/docs/EmbeddingsRequestModel.md | 27 + .../generated/docs/EmbeddingsResponse.md | 29 + .../docs/EmbeddingsResponseDataInner.md | 29 + .../generated/docs/EndpointPricing.md | 28 + src/together/generated/docs/EndpointsApi.md | 416 +++++ src/together/generated/docs/ErrorData.md | 27 + src/together/generated/docs/ErrorDataError.md | 30 + .../generated/docs/FileDeleteResponse.md | 28 + src/together/generated/docs/FileList.md | 27 + src/together/generated/docs/FileObject.md | 30 + src/together/generated/docs/FileResponse.md | 35 + src/together/generated/docs/FilesApi.md | 320 ++++ src/together/generated/docs/FineTuneEvent.md | 40 + .../generated/docs/FineTunesPostRequest.md | 45 + .../docs/FineTunesPostRequestTrainOnInputs.md | 27 + .../docs/FineTunesPostRequestTrainingType.md | 31 + src/together/generated/docs/FineTuningApi.md | 488 +++++ .../generated/docs/FinetuneDownloadResult.md | 31 + .../generated/docs/FinetuneEventLevels.md | 18 + .../generated/docs/FinetuneEventType.md | 56 + .../generated/docs/FinetuneJobStatus.md | 24 + src/together/generated/docs/FinetuneList.md | 27 + .../generated/docs/FinetuneListEvents.md | 27 + .../generated/docs/FinetuneResponse.md | 58 + .../docs/FinetuneResponseTrainOnInputs.md | 26 + src/together/generated/docs/FinishReason.md | 16 + .../generated/docs/FullTrainingType.md | 27 + src/together/generated/docs/HardwareApi.md | 88 + .../generated/docs/HardwareAvailability.md | 28 + src/together/generated/docs/HardwareSpec.md | 31 + .../generated/docs/HardwareWithStatus.md | 33 + src/together/generated/docs/ImageResponse.md | 30 + .../generated/docs/ImageResponseDataInner.md | 29 + src/together/generated/docs/ImagesApi.md | 87 + .../docs/ImagesGenerationsPostRequest.md | 39 + ...esGenerationsPostRequestImageLorasInner.md | 28 + .../docs/ImagesGenerationsPostRequestModel.md | 27 + src/together/generated/docs/LRScheduler.md | 28 + .../generated/docs/LinearLRSchedulerArgs.md | 27 + src/together/generated/docs/ListEndpoint.md | 35 + .../docs/ListEndpoints200Response.md | 28 + .../generated/docs/ListHardware200Response.md | 28 + .../docs/ListHardware200ResponseOneOf.md | 29 + .../docs/ListHardware200ResponseOneOf1.md | 29 + .../ListHardware200ResponseOneOf1DataInner.md | 32 + .../ListHardware200ResponseOneOfDataInner.md | 32 + .../generated/docs/LoRATrainingType.md | 31 + src/together/generated/docs/LogprobsPart.md | 29 + src/together/generated/docs/ModelInfo.md | 36 + src/together/generated/docs/ModelsApi.md | 87 + src/together/generated/docs/Pricing.md | 31 + .../generated/docs/PromptPartInner.md | 28 + src/together/generated/docs/RerankApi.md | 93 + src/together/generated/docs/RerankRequest.md | 32 + .../generated/docs/RerankRequestDocuments.md | 27 + .../generated/docs/RerankRequestModel.md | 27 + src/together/generated/docs/RerankResponse.md | 31 + .../docs/RerankResponseResultsInner.md | 29 + .../RerankResponseResultsInnerDocument.md | 27 + src/together/generated/docs/StreamSentinel.md | 27 + src/together/generated/docs/ToolChoice.md | 30 + .../generated/docs/ToolChoiceFunction.md | 28 + src/together/generated/docs/ToolsPart.md | 28 + .../generated/docs/ToolsPartFunction.md | 29 + .../generated/docs/UpdateEndpointRequest.md | 29 + src/together/generated/docs/UsageData.md | 29 + src/together/generated/exceptions.py | 220 +++ src/together/generated/models/__init__.py | 197 ++ .../generated/models/audio_speech_request.py | 212 +++ .../models/audio_speech_request_model.py | 158 ++ .../models/audio_speech_request_voice.py | 158 ++ .../models/audio_speech_stream_chunk.py | 98 + .../models/audio_speech_stream_event.py | 95 + .../models/audio_speech_stream_response.py | 169 ++ src/together/generated/models/autoscaling.py | 93 + ...chat_completion_assistant_message_param.py | 130 ++ .../models/chat_completion_choice.py | 112 ++ .../models/chat_completion_choice_delta.py | 134 ++ ...t_completion_choice_delta_function_call.py | 86 + .../chat_completion_choices_data_inner.py | 123 ++ ..._completion_choices_data_inner_logprobs.py | 97 + .../generated/models/chat_completion_chunk.py | 139 ++ .../chat_completion_chunk_choices_inner.py | 112 ++ .../generated/models/chat_completion_event.py | 95 + .../chat_completion_function_message_param.py | 98 + .../models/chat_completion_message.py | 127 ++ .../chat_completion_message_function_call.py | 86 + .../models/chat_completion_message_param.py | 266 +++ .../models/chat_completion_request.py | 304 +++ .../chat_completion_request_function_call.py | 177 ++ ...completion_request_function_call_one_of.py | 83 + .../chat_completion_request_messages_inner.py | 99 + .../models/chat_completion_request_model.py | 158 ++ ...chat_completion_request_response_format.py | 90 + .../chat_completion_request_tool_choice.py | 166 ++ .../models/chat_completion_response.py | 136 ++ .../models/chat_completion_stream.py | 169 ++ .../chat_completion_system_message_param.py | 98 + .../generated/models/chat_completion_token.py | 100 + .../generated/models/chat_completion_tool.py | 106 ++ .../models/chat_completion_tool_function.py | 91 + .../chat_completion_tool_message_param.py | 98 + .../chat_completion_user_message_param.py | 98 + .../generated/models/completion_choice.py | 83 + .../models/completion_choices_data_inner.py | 101 + .../generated/models/completion_chunk.py | 139 ++ .../models/completion_chunk_usage.py | 95 + .../generated/models/completion_event.py | 95 + .../generated/models/completion_request.py | 212 +++ .../models/completion_request_model.py | 158 ++ .../models/completion_request_safety_model.py | 158 ++ .../generated/models/completion_response.py | 151 ++ .../generated/models/completion_stream.py | 169 ++ .../generated/models/completion_token.py | 100 + .../models/create_endpoint_request.py | 156 ++ .../generated/models/dedicated_endpoint.py | 157 ++ .../generated/models/embeddings_request.py | 105 ++ .../models/embeddings_request_input.py | 171 ++ .../models/embeddings_request_model.py | 158 ++ .../generated/models/embeddings_response.py | 115 ++ .../models/embeddings_response_data_inner.py | 105 ++ .../generated/models/endpoint_pricing.py | 85 + src/together/generated/models/error_data.py | 95 + .../generated/models/error_data_error.py | 93 + .../generated/models/file_delete_response.py | 84 + src/together/generated/models/file_list.py | 99 + src/together/generated/models/file_object.py | 93 + .../generated/models/file_response.py | 135 ++ .../generated/models/fine_tune_event.py | 137 ++ .../models/fine_tunes_post_request.py | 233 +++ ...fine_tunes_post_request_train_on_inputs.py | 170 ++ .../fine_tunes_post_request_training_type.py | 172 ++ .../models/finetune_download_result.py | 116 ++ .../generated/models/finetune_event_levels.py | 39 + .../generated/models/finetune_event_type.py | 58 + .../generated/models/finetune_job_status.py | 42 + .../generated/models/finetune_list.py | 99 + .../generated/models/finetune_list_events.py | 99 + .../generated/models/finetune_response.py | 222 +++ .../finetune_response_train_on_inputs.py | 170 ++ .../generated/models/finish_reason.py | 38 + .../generated/models/full_training_type.py | 90 + .../generated/models/hardware_availability.py | 94 + .../generated/models/hardware_spec.py | 100 + .../generated/models/hardware_with_status.py | 140 ++ .../generated/models/image_response.py | 112 ++ .../models/image_response_data_inner.py | 85 + .../models/images_generations_post_request.py | 217 +++ ...erations_post_request_image_loras_inner.py | 88 + .../images_generations_post_request_model.py | 158 ++ .../models/linear_lr_scheduler_args.py | 94 + .../generated/models/list_endpoint.py | 136 ++ .../models/list_endpoints200_response.py | 108 ++ .../models/list_hardware200_response.py | 185 ++ .../list_hardware200_response_one_of.py | 113 ++ .../list_hardware200_response_one_of1.py | 113 ++ ...hardware200_response_one_of1_data_inner.py | 140 ++ ..._hardware200_response_one_of_data_inner.py | 137 ++ .../generated/models/lo_ra_training_type.py | 123 ++ .../generated/models/logprobs_part.py | 97 + src/together/generated/models/lr_scheduler.py | 96 + src/together/generated/models/model_info.py | 135 ++ src/together/generated/models/pricing.py | 101 + .../generated/models/prompt_part_inner.py | 97 + .../generated/models/rerank_request.py | 144 ++ .../models/rerank_request_documents.py | 171 ++ .../generated/models/rerank_request_model.py | 158 ++ .../generated/models/rerank_response.py | 127 ++ .../models/rerank_response_results_inner.py | 101 + .../rerank_response_results_inner_document.py | 83 + .../generated/models/stream_sentinel.py | 90 + src/together/generated/models/tool_choice.py | 115 ++ .../generated/models/tool_choice_function.py | 86 + src/together/generated/models/tools_part.py | 97 + .../generated/models/tools_part_function.py | 93 + .../models/update_endpoint_request.py | 115 ++ src/together/generated/models/usage_data.py | 95 + src/together/generated/rest.py | 195 ++ src/together/generated/test/__init__.py | 0 src/together/generated/test/test_audio_api.py | 38 + .../test/test_audio_speech_request.py | 63 + .../test/test_audio_speech_request_model.py | 52 + .../test/test_audio_speech_request_voice.py | 52 + .../test/test_audio_speech_stream_chunk.py | 58 + .../test/test_audio_speech_stream_event.py | 60 + .../test/test_audio_speech_stream_response.py | 56 + .../generated/test/test_autoscaling.py | 56 + src/together/generated/test/test_chat_api.py | 38 + ...chat_completion_assistant_message_param.py | 70 + .../test/test_chat_completion_choice.py | 98 + .../test/test_chat_completion_choice_delta.py | 70 + ...t_completion_choice_delta_function_call.py | 58 + ...test_chat_completion_choices_data_inner.py | 74 + ..._completion_choices_data_inner_logprobs.py | 63 + .../test/test_chat_completion_chunk.py | 108 ++ ...est_chat_completion_chunk_choices_inner.py | 92 + .../test/test_chat_completion_event.py | 112 ++ ..._chat_completion_function_message_param.py | 60 + .../test/test_chat_completion_message.py | 68 + ...t_chat_completion_message_function_call.py | 58 + .../test_chat_completion_message_param.py | 74 + .../test/test_chat_completion_request.py | 98 + ...t_chat_completion_request_function_call.py | 56 + ...completion_request_function_call_one_of.py | 56 + ..._chat_completion_request_messages_inner.py | 58 + .../test_chat_completion_request_model.py | 54 + ...chat_completion_request_response_format.py | 58 + ...est_chat_completion_request_tool_choice.py | 66 + .../test/test_chat_completion_response.py | 110 ++ .../test/test_chat_completion_stream.py | 54 + ...st_chat_completion_system_message_param.py | 59 + .../test/test_chat_completion_token.py | 60 + .../test/test_chat_completion_tool.py | 66 + .../test_chat_completion_tool_function.py | 60 + ...test_chat_completion_tool_message_param.py | 60 + ...test_chat_completion_user_message_param.py | 59 + .../generated/test/test_completion_api.py | 38 + .../generated/test/test_completion_choice.py | 53 + .../test_completion_choices_data_inner.py | 67 + .../generated/test/test_completion_chunk.py | 77 + .../test/test_completion_chunk_usage.py | 58 + .../generated/test/test_completion_event.py | 80 + .../generated/test/test_completion_request.py | 74 + .../test/test_completion_request_model.py | 52 + .../test_completion_request_safety_model.py | 54 + .../test/test_completion_response.py | 114 ++ .../generated/test/test_completion_stream.py | 54 + .../generated/test/test_completion_token.py | 60 + .../test/test_create_endpoint_request.py | 66 + .../generated/test/test_dedicated_endpoint.py | 78 + .../generated/test/test_embeddings_api.py | 38 + .../generated/test/test_embeddings_request.py | 56 + .../test/test_embeddings_request_input.py | 52 + .../test/test_embeddings_request_model.py | 52 + .../test/test_embeddings_response.py | 72 + .../test_embeddings_response_data_inner.py | 64 + .../generated/test/test_endpoint_pricing.py | 54 + .../generated/test/test_endpoints_api.py | 66 + .../generated/test/test_error_data.py | 62 + .../generated/test/test_error_data_error.py | 58 + .../test/test_file_delete_response.py | 54 + src/together/generated/test/test_file_list.py | 76 + .../generated/test/test_file_object.py | 56 + .../generated/test/test_file_response.py | 70 + src/together/generated/test/test_files_api.py | 59 + .../generated/test/test_fine_tune_event.py | 79 + .../test/test_fine_tunes_post_request.py | 76 + ...fine_tunes_post_request_train_on_inputs.py | 54 + ...t_fine_tunes_post_request_training_type.py | 62 + .../generated/test/test_fine_tuning_api.py | 73 + .../test/test_finetune_download_result.py | 57 + .../test/test_finetune_event_levels.py | 35 + .../test/test_finetune_event_type.py | 35 + .../test/test_finetune_job_status.py | 35 + .../generated/test/test_finetune_list.py | 54 + .../test/test_finetune_list_events.py | 54 + .../generated/test/test_finetune_response.py | 89 + .../test_finetune_response_train_on_inputs.py | 54 + .../generated/test/test_finish_reason.py | 35 + .../generated/test/test_full_training_type.py | 54 + .../generated/test/test_hardware_api.py | 38 + .../test/test_hardware_availability.py | 54 + .../generated/test/test_hardware_spec.py | 60 + .../test/test_hardware_with_status.py | 74 + .../generated/test/test_image_response.py | 70 + .../test/test_image_response_data_inner.py | 56 + .../generated/test/test_images_api.py | 38 + .../test_images_generations_post_request.py | 73 + ...erations_post_request_image_loras_inner.py | 60 + ...t_images_generations_post_request_model.py | 54 + .../test/test_linear_lr_scheduler_args.py | 53 + .../generated/test/test_list_endpoint.py | 68 + .../test/test_list_endpoints200_response.py | 78 + .../test/test_list_hardware200_response.py | 60 + .../test_list_hardware200_response_one_of.py | 62 + .../test_list_hardware200_response_one_of1.py | 62 + ...hardware200_response_one_of1_data_inner.py | 78 + ..._hardware200_response_one_of_data_inner.py | 75 + .../test/test_lo_ra_training_type.py | 60 + .../generated/test/test_logprobs_part.py | 61 + .../generated/test/test_lr_scheduler.py | 56 + .../generated/test/test_model_info.py | 71 + .../generated/test/test_models_api.py | 38 + src/together/generated/test/test_pricing.py | 62 + .../generated/test/test_prompt_part_inner.py | 63 + .../generated/test/test_rerank_api.py | 38 + .../generated/test/test_rerank_request.py | 61 + .../test/test_rerank_request_documents.py | 52 + .../test/test_rerank_request_model.py | 52 + .../generated/test/test_rerank_response.py | 60 + .../test_rerank_response_results_inner.py | 62 + ..._rerank_response_results_inner_document.py | 55 + .../generated/test/test_stream_sentinel.py | 54 + .../generated/test/test_tool_choice.py | 64 + .../test/test_tool_choice_function.py | 56 + .../generated/test/test_tools_part.py | 57 + .../test/test_tools_part_function.py | 55 + .../test/test_update_endpoint_request.py | 57 + .../generated/test/test_usage_data.py | 58 + 374 files changed, 34742 insertions(+), 16 deletions(-) delete mode 100644 MANIFEST.in create mode 100644 src/together/generated/__init__.py create mode 100644 src/together/generated/api/__init__.py create mode 100644 src/together/generated/api/audio_api.py create mode 100644 src/together/generated/api/chat_api.py create mode 100644 src/together/generated/api/completion_api.py create mode 100644 src/together/generated/api/embeddings_api.py create mode 100644 src/together/generated/api/endpoints_api.py create mode 100644 src/together/generated/api/files_api.py create mode 100644 src/together/generated/api/fine_tuning_api.py create mode 100644 src/together/generated/api/hardware_api.py create mode 100644 src/together/generated/api/images_api.py create mode 100644 src/together/generated/api/models_api.py create mode 100644 src/together/generated/api/rerank_api.py create mode 100644 src/together/generated/api_client.py create mode 100644 src/together/generated/api_response.py create mode 100644 src/together/generated/configuration.py create mode 100644 src/together/generated/docs/AudioApi.md create mode 100644 src/together/generated/docs/AudioSpeechRequest.md create mode 100644 src/together/generated/docs/AudioSpeechRequestModel.md create mode 100644 src/together/generated/docs/AudioSpeechRequestVoice.md create mode 100644 src/together/generated/docs/AudioSpeechStreamChunk.md create mode 100644 src/together/generated/docs/AudioSpeechStreamEvent.md create mode 100644 src/together/generated/docs/AudioSpeechStreamResponse.md create mode 100644 src/together/generated/docs/Autoscaling.md create mode 100644 src/together/generated/docs/ChatApi.md create mode 100644 src/together/generated/docs/ChatCompletionAssistantMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionChoice.md create mode 100644 src/together/generated/docs/ChatCompletionChoiceDelta.md create mode 100644 src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md create mode 100644 src/together/generated/docs/ChatCompletionChoicesDataInner.md create mode 100644 src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md create mode 100644 src/together/generated/docs/ChatCompletionChunk.md create mode 100644 src/together/generated/docs/ChatCompletionChunkChoicesInner.md create mode 100644 src/together/generated/docs/ChatCompletionEvent.md create mode 100644 src/together/generated/docs/ChatCompletionFunctionMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionMessage.md create mode 100644 src/together/generated/docs/ChatCompletionMessageFunctionCall.md create mode 100644 src/together/generated/docs/ChatCompletionMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionRequest.md create mode 100644 src/together/generated/docs/ChatCompletionRequestFunctionCall.md create mode 100644 src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md create mode 100644 src/together/generated/docs/ChatCompletionRequestMessagesInner.md create mode 100644 src/together/generated/docs/ChatCompletionRequestModel.md create mode 100644 src/together/generated/docs/ChatCompletionRequestResponseFormat.md create mode 100644 src/together/generated/docs/ChatCompletionRequestToolChoice.md create mode 100644 src/together/generated/docs/ChatCompletionResponse.md create mode 100644 src/together/generated/docs/ChatCompletionStream.md create mode 100644 src/together/generated/docs/ChatCompletionSystemMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionToken.md create mode 100644 src/together/generated/docs/ChatCompletionTool.md create mode 100644 src/together/generated/docs/ChatCompletionToolFunction.md create mode 100644 src/together/generated/docs/ChatCompletionToolMessageParam.md create mode 100644 src/together/generated/docs/ChatCompletionUserMessageParam.md create mode 100644 src/together/generated/docs/CompletionApi.md create mode 100644 src/together/generated/docs/CompletionChoice.md create mode 100644 src/together/generated/docs/CompletionChoicesDataInner.md create mode 100644 src/together/generated/docs/CompletionChunk.md create mode 100644 src/together/generated/docs/CompletionChunkUsage.md create mode 100644 src/together/generated/docs/CompletionEvent.md create mode 100644 src/together/generated/docs/CompletionRequest.md create mode 100644 src/together/generated/docs/CompletionRequestModel.md create mode 100644 src/together/generated/docs/CompletionRequestSafetyModel.md create mode 100644 src/together/generated/docs/CompletionResponse.md create mode 100644 src/together/generated/docs/CompletionStream.md create mode 100644 src/together/generated/docs/CompletionToken.md create mode 100644 src/together/generated/docs/CreateEndpointRequest.md create mode 100644 src/together/generated/docs/DedicatedEndpoint.md create mode 100644 src/together/generated/docs/EmbeddingsApi.md create mode 100644 src/together/generated/docs/EmbeddingsRequest.md create mode 100644 src/together/generated/docs/EmbeddingsRequestInput.md create mode 100644 src/together/generated/docs/EmbeddingsRequestModel.md create mode 100644 src/together/generated/docs/EmbeddingsResponse.md create mode 100644 src/together/generated/docs/EmbeddingsResponseDataInner.md create mode 100644 src/together/generated/docs/EndpointPricing.md create mode 100644 src/together/generated/docs/EndpointsApi.md create mode 100644 src/together/generated/docs/ErrorData.md create mode 100644 src/together/generated/docs/ErrorDataError.md create mode 100644 src/together/generated/docs/FileDeleteResponse.md create mode 100644 src/together/generated/docs/FileList.md create mode 100644 src/together/generated/docs/FileObject.md create mode 100644 src/together/generated/docs/FileResponse.md create mode 100644 src/together/generated/docs/FilesApi.md create mode 100644 src/together/generated/docs/FineTuneEvent.md create mode 100644 src/together/generated/docs/FineTunesPostRequest.md create mode 100644 src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md create mode 100644 src/together/generated/docs/FineTunesPostRequestTrainingType.md create mode 100644 src/together/generated/docs/FineTuningApi.md create mode 100644 src/together/generated/docs/FinetuneDownloadResult.md create mode 100644 src/together/generated/docs/FinetuneEventLevels.md create mode 100644 src/together/generated/docs/FinetuneEventType.md create mode 100644 src/together/generated/docs/FinetuneJobStatus.md create mode 100644 src/together/generated/docs/FinetuneList.md create mode 100644 src/together/generated/docs/FinetuneListEvents.md create mode 100644 src/together/generated/docs/FinetuneResponse.md create mode 100644 src/together/generated/docs/FinetuneResponseTrainOnInputs.md create mode 100644 src/together/generated/docs/FinishReason.md create mode 100644 src/together/generated/docs/FullTrainingType.md create mode 100644 src/together/generated/docs/HardwareApi.md create mode 100644 src/together/generated/docs/HardwareAvailability.md create mode 100644 src/together/generated/docs/HardwareSpec.md create mode 100644 src/together/generated/docs/HardwareWithStatus.md create mode 100644 src/together/generated/docs/ImageResponse.md create mode 100644 src/together/generated/docs/ImageResponseDataInner.md create mode 100644 src/together/generated/docs/ImagesApi.md create mode 100644 src/together/generated/docs/ImagesGenerationsPostRequest.md create mode 100644 src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md create mode 100644 src/together/generated/docs/ImagesGenerationsPostRequestModel.md create mode 100644 src/together/generated/docs/LRScheduler.md create mode 100644 src/together/generated/docs/LinearLRSchedulerArgs.md create mode 100644 src/together/generated/docs/ListEndpoint.md create mode 100644 src/together/generated/docs/ListEndpoints200Response.md create mode 100644 src/together/generated/docs/ListHardware200Response.md create mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf.md create mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf1.md create mode 100644 src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md create mode 100644 src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md create mode 100644 src/together/generated/docs/LoRATrainingType.md create mode 100644 src/together/generated/docs/LogprobsPart.md create mode 100644 src/together/generated/docs/ModelInfo.md create mode 100644 src/together/generated/docs/ModelsApi.md create mode 100644 src/together/generated/docs/Pricing.md create mode 100644 src/together/generated/docs/PromptPartInner.md create mode 100644 src/together/generated/docs/RerankApi.md create mode 100644 src/together/generated/docs/RerankRequest.md create mode 100644 src/together/generated/docs/RerankRequestDocuments.md create mode 100644 src/together/generated/docs/RerankRequestModel.md create mode 100644 src/together/generated/docs/RerankResponse.md create mode 100644 src/together/generated/docs/RerankResponseResultsInner.md create mode 100644 src/together/generated/docs/RerankResponseResultsInnerDocument.md create mode 100644 src/together/generated/docs/StreamSentinel.md create mode 100644 src/together/generated/docs/ToolChoice.md create mode 100644 src/together/generated/docs/ToolChoiceFunction.md create mode 100644 src/together/generated/docs/ToolsPart.md create mode 100644 src/together/generated/docs/ToolsPartFunction.md create mode 100644 src/together/generated/docs/UpdateEndpointRequest.md create mode 100644 src/together/generated/docs/UsageData.md create mode 100644 src/together/generated/exceptions.py create mode 100644 src/together/generated/models/__init__.py create mode 100644 src/together/generated/models/audio_speech_request.py create mode 100644 src/together/generated/models/audio_speech_request_model.py create mode 100644 src/together/generated/models/audio_speech_request_voice.py create mode 100644 src/together/generated/models/audio_speech_stream_chunk.py create mode 100644 src/together/generated/models/audio_speech_stream_event.py create mode 100644 src/together/generated/models/audio_speech_stream_response.py create mode 100644 src/together/generated/models/autoscaling.py create mode 100644 src/together/generated/models/chat_completion_assistant_message_param.py create mode 100644 src/together/generated/models/chat_completion_choice.py create mode 100644 src/together/generated/models/chat_completion_choice_delta.py create mode 100644 src/together/generated/models/chat_completion_choice_delta_function_call.py create mode 100644 src/together/generated/models/chat_completion_choices_data_inner.py create mode 100644 src/together/generated/models/chat_completion_choices_data_inner_logprobs.py create mode 100644 src/together/generated/models/chat_completion_chunk.py create mode 100644 src/together/generated/models/chat_completion_chunk_choices_inner.py create mode 100644 src/together/generated/models/chat_completion_event.py create mode 100644 src/together/generated/models/chat_completion_function_message_param.py create mode 100644 src/together/generated/models/chat_completion_message.py create mode 100644 src/together/generated/models/chat_completion_message_function_call.py create mode 100644 src/together/generated/models/chat_completion_message_param.py create mode 100644 src/together/generated/models/chat_completion_request.py create mode 100644 src/together/generated/models/chat_completion_request_function_call.py create mode 100644 src/together/generated/models/chat_completion_request_function_call_one_of.py create mode 100644 src/together/generated/models/chat_completion_request_messages_inner.py create mode 100644 src/together/generated/models/chat_completion_request_model.py create mode 100644 src/together/generated/models/chat_completion_request_response_format.py create mode 100644 src/together/generated/models/chat_completion_request_tool_choice.py create mode 100644 src/together/generated/models/chat_completion_response.py create mode 100644 src/together/generated/models/chat_completion_stream.py create mode 100644 src/together/generated/models/chat_completion_system_message_param.py create mode 100644 src/together/generated/models/chat_completion_token.py create mode 100644 src/together/generated/models/chat_completion_tool.py create mode 100644 src/together/generated/models/chat_completion_tool_function.py create mode 100644 src/together/generated/models/chat_completion_tool_message_param.py create mode 100644 src/together/generated/models/chat_completion_user_message_param.py create mode 100644 src/together/generated/models/completion_choice.py create mode 100644 src/together/generated/models/completion_choices_data_inner.py create mode 100644 src/together/generated/models/completion_chunk.py create mode 100644 src/together/generated/models/completion_chunk_usage.py create mode 100644 src/together/generated/models/completion_event.py create mode 100644 src/together/generated/models/completion_request.py create mode 100644 src/together/generated/models/completion_request_model.py create mode 100644 src/together/generated/models/completion_request_safety_model.py create mode 100644 src/together/generated/models/completion_response.py create mode 100644 src/together/generated/models/completion_stream.py create mode 100644 src/together/generated/models/completion_token.py create mode 100644 src/together/generated/models/create_endpoint_request.py create mode 100644 src/together/generated/models/dedicated_endpoint.py create mode 100644 src/together/generated/models/embeddings_request.py create mode 100644 src/together/generated/models/embeddings_request_input.py create mode 100644 src/together/generated/models/embeddings_request_model.py create mode 100644 src/together/generated/models/embeddings_response.py create mode 100644 src/together/generated/models/embeddings_response_data_inner.py create mode 100644 src/together/generated/models/endpoint_pricing.py create mode 100644 src/together/generated/models/error_data.py create mode 100644 src/together/generated/models/error_data_error.py create mode 100644 src/together/generated/models/file_delete_response.py create mode 100644 src/together/generated/models/file_list.py create mode 100644 src/together/generated/models/file_object.py create mode 100644 src/together/generated/models/file_response.py create mode 100644 src/together/generated/models/fine_tune_event.py create mode 100644 src/together/generated/models/fine_tunes_post_request.py create mode 100644 src/together/generated/models/fine_tunes_post_request_train_on_inputs.py create mode 100644 src/together/generated/models/fine_tunes_post_request_training_type.py create mode 100644 src/together/generated/models/finetune_download_result.py create mode 100644 src/together/generated/models/finetune_event_levels.py create mode 100644 src/together/generated/models/finetune_event_type.py create mode 100644 src/together/generated/models/finetune_job_status.py create mode 100644 src/together/generated/models/finetune_list.py create mode 100644 src/together/generated/models/finetune_list_events.py create mode 100644 src/together/generated/models/finetune_response.py create mode 100644 src/together/generated/models/finetune_response_train_on_inputs.py create mode 100644 src/together/generated/models/finish_reason.py create mode 100644 src/together/generated/models/full_training_type.py create mode 100644 src/together/generated/models/hardware_availability.py create mode 100644 src/together/generated/models/hardware_spec.py create mode 100644 src/together/generated/models/hardware_with_status.py create mode 100644 src/together/generated/models/image_response.py create mode 100644 src/together/generated/models/image_response_data_inner.py create mode 100644 src/together/generated/models/images_generations_post_request.py create mode 100644 src/together/generated/models/images_generations_post_request_image_loras_inner.py create mode 100644 src/together/generated/models/images_generations_post_request_model.py create mode 100644 src/together/generated/models/linear_lr_scheduler_args.py create mode 100644 src/together/generated/models/list_endpoint.py create mode 100644 src/together/generated/models/list_endpoints200_response.py create mode 100644 src/together/generated/models/list_hardware200_response.py create mode 100644 src/together/generated/models/list_hardware200_response_one_of.py create mode 100644 src/together/generated/models/list_hardware200_response_one_of1.py create mode 100644 src/together/generated/models/list_hardware200_response_one_of1_data_inner.py create mode 100644 src/together/generated/models/list_hardware200_response_one_of_data_inner.py create mode 100644 src/together/generated/models/lo_ra_training_type.py create mode 100644 src/together/generated/models/logprobs_part.py create mode 100644 src/together/generated/models/lr_scheduler.py create mode 100644 src/together/generated/models/model_info.py create mode 100644 src/together/generated/models/pricing.py create mode 100644 src/together/generated/models/prompt_part_inner.py create mode 100644 src/together/generated/models/rerank_request.py create mode 100644 src/together/generated/models/rerank_request_documents.py create mode 100644 src/together/generated/models/rerank_request_model.py create mode 100644 src/together/generated/models/rerank_response.py create mode 100644 src/together/generated/models/rerank_response_results_inner.py create mode 100644 src/together/generated/models/rerank_response_results_inner_document.py create mode 100644 src/together/generated/models/stream_sentinel.py create mode 100644 src/together/generated/models/tool_choice.py create mode 100644 src/together/generated/models/tool_choice_function.py create mode 100644 src/together/generated/models/tools_part.py create mode 100644 src/together/generated/models/tools_part_function.py create mode 100644 src/together/generated/models/update_endpoint_request.py create mode 100644 src/together/generated/models/usage_data.py create mode 100644 src/together/generated/rest.py create mode 100644 src/together/generated/test/__init__.py create mode 100644 src/together/generated/test/test_audio_api.py create mode 100644 src/together/generated/test/test_audio_speech_request.py create mode 100644 src/together/generated/test/test_audio_speech_request_model.py create mode 100644 src/together/generated/test/test_audio_speech_request_voice.py create mode 100644 src/together/generated/test/test_audio_speech_stream_chunk.py create mode 100644 src/together/generated/test/test_audio_speech_stream_event.py create mode 100644 src/together/generated/test/test_audio_speech_stream_response.py create mode 100644 src/together/generated/test/test_autoscaling.py create mode 100644 src/together/generated/test/test_chat_api.py create mode 100644 src/together/generated/test/test_chat_completion_assistant_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_choice.py create mode 100644 src/together/generated/test/test_chat_completion_choice_delta.py create mode 100644 src/together/generated/test/test_chat_completion_choice_delta_function_call.py create mode 100644 src/together/generated/test/test_chat_completion_choices_data_inner.py create mode 100644 src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py create mode 100644 src/together/generated/test/test_chat_completion_chunk.py create mode 100644 src/together/generated/test/test_chat_completion_chunk_choices_inner.py create mode 100644 src/together/generated/test/test_chat_completion_event.py create mode 100644 src/together/generated/test/test_chat_completion_function_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_message.py create mode 100644 src/together/generated/test/test_chat_completion_message_function_call.py create mode 100644 src/together/generated/test/test_chat_completion_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_request.py create mode 100644 src/together/generated/test/test_chat_completion_request_function_call.py create mode 100644 src/together/generated/test/test_chat_completion_request_function_call_one_of.py create mode 100644 src/together/generated/test/test_chat_completion_request_messages_inner.py create mode 100644 src/together/generated/test/test_chat_completion_request_model.py create mode 100644 src/together/generated/test/test_chat_completion_request_response_format.py create mode 100644 src/together/generated/test/test_chat_completion_request_tool_choice.py create mode 100644 src/together/generated/test/test_chat_completion_response.py create mode 100644 src/together/generated/test/test_chat_completion_stream.py create mode 100644 src/together/generated/test/test_chat_completion_system_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_token.py create mode 100644 src/together/generated/test/test_chat_completion_tool.py create mode 100644 src/together/generated/test/test_chat_completion_tool_function.py create mode 100644 src/together/generated/test/test_chat_completion_tool_message_param.py create mode 100644 src/together/generated/test/test_chat_completion_user_message_param.py create mode 100644 src/together/generated/test/test_completion_api.py create mode 100644 src/together/generated/test/test_completion_choice.py create mode 100644 src/together/generated/test/test_completion_choices_data_inner.py create mode 100644 src/together/generated/test/test_completion_chunk.py create mode 100644 src/together/generated/test/test_completion_chunk_usage.py create mode 100644 src/together/generated/test/test_completion_event.py create mode 100644 src/together/generated/test/test_completion_request.py create mode 100644 src/together/generated/test/test_completion_request_model.py create mode 100644 src/together/generated/test/test_completion_request_safety_model.py create mode 100644 src/together/generated/test/test_completion_response.py create mode 100644 src/together/generated/test/test_completion_stream.py create mode 100644 src/together/generated/test/test_completion_token.py create mode 100644 src/together/generated/test/test_create_endpoint_request.py create mode 100644 src/together/generated/test/test_dedicated_endpoint.py create mode 100644 src/together/generated/test/test_embeddings_api.py create mode 100644 src/together/generated/test/test_embeddings_request.py create mode 100644 src/together/generated/test/test_embeddings_request_input.py create mode 100644 src/together/generated/test/test_embeddings_request_model.py create mode 100644 src/together/generated/test/test_embeddings_response.py create mode 100644 src/together/generated/test/test_embeddings_response_data_inner.py create mode 100644 src/together/generated/test/test_endpoint_pricing.py create mode 100644 src/together/generated/test/test_endpoints_api.py create mode 100644 src/together/generated/test/test_error_data.py create mode 100644 src/together/generated/test/test_error_data_error.py create mode 100644 src/together/generated/test/test_file_delete_response.py create mode 100644 src/together/generated/test/test_file_list.py create mode 100644 src/together/generated/test/test_file_object.py create mode 100644 src/together/generated/test/test_file_response.py create mode 100644 src/together/generated/test/test_files_api.py create mode 100644 src/together/generated/test/test_fine_tune_event.py create mode 100644 src/together/generated/test/test_fine_tunes_post_request.py create mode 100644 src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py create mode 100644 src/together/generated/test/test_fine_tunes_post_request_training_type.py create mode 100644 src/together/generated/test/test_fine_tuning_api.py create mode 100644 src/together/generated/test/test_finetune_download_result.py create mode 100644 src/together/generated/test/test_finetune_event_levels.py create mode 100644 src/together/generated/test/test_finetune_event_type.py create mode 100644 src/together/generated/test/test_finetune_job_status.py create mode 100644 src/together/generated/test/test_finetune_list.py create mode 100644 src/together/generated/test/test_finetune_list_events.py create mode 100644 src/together/generated/test/test_finetune_response.py create mode 100644 src/together/generated/test/test_finetune_response_train_on_inputs.py create mode 100644 src/together/generated/test/test_finish_reason.py create mode 100644 src/together/generated/test/test_full_training_type.py create mode 100644 src/together/generated/test/test_hardware_api.py create mode 100644 src/together/generated/test/test_hardware_availability.py create mode 100644 src/together/generated/test/test_hardware_spec.py create mode 100644 src/together/generated/test/test_hardware_with_status.py create mode 100644 src/together/generated/test/test_image_response.py create mode 100644 src/together/generated/test/test_image_response_data_inner.py create mode 100644 src/together/generated/test/test_images_api.py create mode 100644 src/together/generated/test/test_images_generations_post_request.py create mode 100644 src/together/generated/test/test_images_generations_post_request_image_loras_inner.py create mode 100644 src/together/generated/test/test_images_generations_post_request_model.py create mode 100644 src/together/generated/test/test_linear_lr_scheduler_args.py create mode 100644 src/together/generated/test/test_list_endpoint.py create mode 100644 src/together/generated/test/test_list_endpoints200_response.py create mode 100644 src/together/generated/test/test_list_hardware200_response.py create mode 100644 src/together/generated/test/test_list_hardware200_response_one_of.py create mode 100644 src/together/generated/test/test_list_hardware200_response_one_of1.py create mode 100644 src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py create mode 100644 src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py create mode 100644 src/together/generated/test/test_lo_ra_training_type.py create mode 100644 src/together/generated/test/test_logprobs_part.py create mode 100644 src/together/generated/test/test_lr_scheduler.py create mode 100644 src/together/generated/test/test_model_info.py create mode 100644 src/together/generated/test/test_models_api.py create mode 100644 src/together/generated/test/test_pricing.py create mode 100644 src/together/generated/test/test_prompt_part_inner.py create mode 100644 src/together/generated/test/test_rerank_api.py create mode 100644 src/together/generated/test/test_rerank_request.py create mode 100644 src/together/generated/test/test_rerank_request_documents.py create mode 100644 src/together/generated/test/test_rerank_request_model.py create mode 100644 src/together/generated/test/test_rerank_response.py create mode 100644 src/together/generated/test/test_rerank_response_results_inner.py create mode 100644 src/together/generated/test/test_rerank_response_results_inner_document.py create mode 100644 src/together/generated/test/test_stream_sentinel.py create mode 100644 src/together/generated/test/test_tool_choice.py create mode 100644 src/together/generated/test/test_tool_choice_function.py create mode 100644 src/together/generated/test/test_tools_part.py create mode 100644 src/together/generated/test/test_tools_part_function.py create mode 100644 src/together/generated/test/test_update_endpoint_request.py create mode 100644 src/together/generated/test/test_usage_data.py diff --git a/.github/workflows/_integration_tests.yml b/.github/workflows/_integration_tests.yml index f20d3a68..5b6813b9 100644 --- a/.github/workflows/_integration_tests.yml +++ b/.github/workflows/_integration_tests.yml @@ -39,10 +39,6 @@ jobs: poetry-version: ${{ env.POETRY_VERSION }} cache-key: core - - name: Generate OpenAPI client - run: | - make generate-client-from-existing-spec - - name: Install dependencies shell: bash run: poetry install --with quality,tests diff --git a/.github/workflows/_tests.yml b/.github/workflows/_tests.yml index fd7fcbae..62e42844 100644 --- a/.github/workflows/_tests.yml +++ b/.github/workflows/_tests.yml @@ -39,10 +39,6 @@ jobs: poetry-version: ${{ env.POETRY_VERSION }} cache-key: core - - name: Generate OpenAPI client - run: | - make generate-client-from-existing-spec - - name: Install dependencies shell: bash run: poetry install --with quality,tests diff --git a/.gitignore b/.gitignore index c94c15ff..b6e47617 100644 --- a/.gitignore +++ b/.gitignore @@ -27,9 +27,6 @@ share/python-wheels/ *.egg MANIFEST -# OpenAPI Generator Ignore -src/together/generated/ - # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 0c0a3427..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -recursive-include src/together/generated * diff --git a/pyproject.toml b/pyproject.toml index 470100e0..6301660b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,3 @@ -include = [ - "src/together/generated/**" -] - [build-system] requires = [ "poetry", diff --git a/src/together/generated/__init__.py b/src/together/generated/__init__.py new file mode 100644 index 00000000..cf250519 --- /dev/null +++ b/src/together/generated/__init__.py @@ -0,0 +1,224 @@ +# coding: utf-8 + +# flake8: noqa + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +__version__ = "1.0.0" + +# import apis into sdk package +from together.generated.api.audio_api import AudioApi +from together.generated.api.chat_api import ChatApi +from together.generated.api.completion_api import CompletionApi +from together.generated.api.embeddings_api import EmbeddingsApi +from together.generated.api.endpoints_api import EndpointsApi +from together.generated.api.files_api import FilesApi +from together.generated.api.fine_tuning_api import FineTuningApi +from together.generated.api.hardware_api import HardwareApi +from together.generated.api.images_api import ImagesApi +from together.generated.api.models_api import ModelsApi +from together.generated.api.rerank_api import RerankApi + +# import ApiClient +from together.generated.api_response import ApiResponse +from together.generated.api_client import ApiClient +from together.generated.configuration import Configuration +from together.generated.exceptions import OpenApiException +from together.generated.exceptions import ApiTypeError +from together.generated.exceptions import ApiValueError +from together.generated.exceptions import ApiKeyError +from together.generated.exceptions import ApiAttributeError +from together.generated.exceptions import ApiException + +# import models into sdk package +from together.generated.models.audio_speech_request import AudioSpeechRequest +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent +from together.generated.models.audio_speech_stream_response import ( + AudioSpeechStreamResponse, +) +from together.generated.models.autoscaling import Autoscaling +from together.generated.models.chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam, +) +from together.generated.models.chat_completion_choice import ChatCompletionChoice +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) +from together.generated.models.chat_completion_choice_delta_function_call import ( + ChatCompletionChoiceDeltaFunctionCall, +) +from together.generated.models.chat_completion_choices_data_inner import ( + ChatCompletionChoicesDataInner, +) +from together.generated.models.chat_completion_choices_data_inner_logprobs import ( + ChatCompletionChoicesDataInnerLogprobs, +) +from together.generated.models.chat_completion_chunk import ChatCompletionChunk +from together.generated.models.chat_completion_chunk_choices_inner import ( + ChatCompletionChunkChoicesInner, +) +from together.generated.models.chat_completion_event import ChatCompletionEvent +from together.generated.models.chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam, +) +from together.generated.models.chat_completion_message import ChatCompletionMessage +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) +from together.generated.models.chat_completion_message_param import ( + ChatCompletionMessageParam, +) +from together.generated.models.chat_completion_request import ChatCompletionRequest +from together.generated.models.chat_completion_request_function_call import ( + ChatCompletionRequestFunctionCall, +) +from together.generated.models.chat_completion_request_function_call_one_of import ( + ChatCompletionRequestFunctionCallOneOf, +) +from together.generated.models.chat_completion_request_messages_inner import ( + ChatCompletionRequestMessagesInner, +) +from together.generated.models.chat_completion_request_model import ( + ChatCompletionRequestModel, +) +from together.generated.models.chat_completion_request_response_format import ( + ChatCompletionRequestResponseFormat, +) +from together.generated.models.chat_completion_request_tool_choice import ( + ChatCompletionRequestToolChoice, +) +from together.generated.models.chat_completion_response import ChatCompletionResponse +from together.generated.models.chat_completion_stream import ChatCompletionStream +from together.generated.models.chat_completion_system_message_param import ( + ChatCompletionSystemMessageParam, +) +from together.generated.models.chat_completion_token import ChatCompletionToken +from together.generated.models.chat_completion_tool import ChatCompletionTool +from together.generated.models.chat_completion_tool_function import ( + ChatCompletionToolFunction, +) +from together.generated.models.chat_completion_tool_message_param import ( + ChatCompletionToolMessageParam, +) +from together.generated.models.chat_completion_user_message_param import ( + ChatCompletionUserMessageParam, +) +from together.generated.models.completion_choice import CompletionChoice +from together.generated.models.completion_choices_data_inner import ( + CompletionChoicesDataInner, +) +from together.generated.models.completion_chunk import CompletionChunk +from together.generated.models.completion_chunk_usage import CompletionChunkUsage +from together.generated.models.completion_event import CompletionEvent +from together.generated.models.completion_request import CompletionRequest +from together.generated.models.completion_request_model import CompletionRequestModel +from together.generated.models.completion_request_safety_model import ( + CompletionRequestSafetyModel, +) +from together.generated.models.completion_response import CompletionResponse +from together.generated.models.completion_stream import CompletionStream +from together.generated.models.completion_token import CompletionToken +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.embeddings_request import EmbeddingsRequest +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel +from together.generated.models.embeddings_response import EmbeddingsResponse +from together.generated.models.embeddings_response_data_inner import ( + EmbeddingsResponseDataInner, +) +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.error_data import ErrorData +from together.generated.models.error_data_error import ErrorDataError +from together.generated.models.file_delete_response import FileDeleteResponse +from together.generated.models.file_list import FileList +from together.generated.models.file_object import FileObject +from together.generated.models.file_response import FileResponse +from together.generated.models.fine_tune_event import FineTuneEvent +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest +from together.generated.models.fine_tunes_post_request_train_on_inputs import ( + FineTunesPostRequestTrainOnInputs, +) +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) +from together.generated.models.finetune_download_result import FinetuneDownloadResult +from together.generated.models.finetune_event_levels import FinetuneEventLevels +from together.generated.models.finetune_event_type import FinetuneEventType +from together.generated.models.finetune_job_status import FinetuneJobStatus +from together.generated.models.finetune_list import FinetuneList +from together.generated.models.finetune_list_events import FinetuneListEvents +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.models.finetune_response_train_on_inputs import ( + FinetuneResponseTrainOnInputs, +) +from together.generated.models.finish_reason import FinishReason +from together.generated.models.full_training_type import FullTrainingType +from together.generated.models.hardware_availability import HardwareAvailability +from together.generated.models.hardware_spec import HardwareSpec +from together.generated.models.hardware_with_status import HardwareWithStatus +from together.generated.models.image_response import ImageResponse +from together.generated.models.image_response_data_inner import ImageResponseDataInner +from together.generated.models.images_generations_post_request import ( + ImagesGenerationsPostRequest, +) +from together.generated.models.images_generations_post_request_image_loras_inner import ( + ImagesGenerationsPostRequestImageLorasInner, +) +from together.generated.models.images_generations_post_request_model import ( + ImagesGenerationsPostRequestModel, +) +from together.generated.models.lr_scheduler import LRScheduler +from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs +from together.generated.models.list_endpoint import ListEndpoint +from together.generated.models.list_endpoints200_response import ( + ListEndpoints200Response, +) +from together.generated.models.list_hardware200_response import ListHardware200Response +from together.generated.models.list_hardware200_response_one_of import ( + ListHardware200ResponseOneOf, +) +from together.generated.models.list_hardware200_response_one_of1 import ( + ListHardware200ResponseOneOf1, +) +from together.generated.models.list_hardware200_response_one_of1_data_inner import ( + ListHardware200ResponseOneOf1DataInner, +) +from together.generated.models.list_hardware200_response_one_of_data_inner import ( + ListHardware200ResponseOneOfDataInner, +) +from together.generated.models.lo_ra_training_type import LoRATrainingType +from together.generated.models.logprobs_part import LogprobsPart +from together.generated.models.model_info import ModelInfo +from together.generated.models.pricing import Pricing +from together.generated.models.prompt_part_inner import PromptPartInner +from together.generated.models.rerank_request import RerankRequest +from together.generated.models.rerank_request_documents import RerankRequestDocuments +from together.generated.models.rerank_request_model import RerankRequestModel +from together.generated.models.rerank_response import RerankResponse +from together.generated.models.rerank_response_results_inner import ( + RerankResponseResultsInner, +) +from together.generated.models.rerank_response_results_inner_document import ( + RerankResponseResultsInnerDocument, +) +from together.generated.models.stream_sentinel import StreamSentinel +from together.generated.models.tool_choice import ToolChoice +from together.generated.models.tool_choice_function import ToolChoiceFunction +from together.generated.models.tools_part import ToolsPart +from together.generated.models.tools_part_function import ToolsPartFunction +from together.generated.models.update_endpoint_request import UpdateEndpointRequest +from together.generated.models.usage_data import UsageData diff --git a/src/together/generated/api/__init__.py b/src/together/generated/api/__init__.py new file mode 100644 index 00000000..50f8b438 --- /dev/null +++ b/src/together/generated/api/__init__.py @@ -0,0 +1,14 @@ +# flake8: noqa + +# import apis into api package +from together.generated.api.audio_api import AudioApi +from together.generated.api.chat_api import ChatApi +from together.generated.api.completion_api import CompletionApi +from together.generated.api.embeddings_api import EmbeddingsApi +from together.generated.api.endpoints_api import EndpointsApi +from together.generated.api.files_api import FilesApi +from together.generated.api.fine_tuning_api import FineTuningApi +from together.generated.api.hardware_api import HardwareApi +from together.generated.api.images_api import ImagesApi +from together.generated.api.models_api import ModelsApi +from together.generated.api.rerank_api import RerankApi diff --git a/src/together/generated/api/audio_api.py b/src/together/generated/api/audio_api.py new file mode 100644 index 00000000..f242af24 --- /dev/null +++ b/src/together/generated/api/audio_api.py @@ -0,0 +1,302 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import StrictBytes, StrictStr +from typing import Optional, Tuple, Union +from together.generated.models.audio_speech_request import AudioSpeechRequest + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class AudioApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def audio_speech( + self, + audio_speech_request: Optional[AudioSpeechRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> bytearray: + """Create audio generation request + + Generate audio from input text + + :param audio_speech_request: + :type audio_speech_request: AudioSpeechRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._audio_speech_serialize( + audio_speech_request=audio_speech_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "bytearray", + "400": "ErrorData", + "429": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def audio_speech_with_http_info( + self, + audio_speech_request: Optional[AudioSpeechRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[bytearray]: + """Create audio generation request + + Generate audio from input text + + :param audio_speech_request: + :type audio_speech_request: AudioSpeechRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._audio_speech_serialize( + audio_speech_request=audio_speech_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "bytearray", + "400": "ErrorData", + "429": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def audio_speech_without_preload_content( + self, + audio_speech_request: Optional[AudioSpeechRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create audio generation request + + Generate audio from input text + + :param audio_speech_request: + :type audio_speech_request: AudioSpeechRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._audio_speech_serialize( + audio_speech_request=audio_speech_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "bytearray", + "400": "ErrorData", + "429": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _audio_speech_serialize( + self, + audio_speech_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if audio_speech_request is not None: + _body_params = audio_speech_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + [ + "application/octet-stream", + "audio/wav", + "audio/mpeg", + "text/event-stream", + "application/json", + ] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/audio/speech", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/chat_api.py b/src/together/generated/api/chat_api.py new file mode 100644 index 00000000..9c4bb818 --- /dev/null +++ b/src/together/generated/api/chat_api.py @@ -0,0 +1,308 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import Optional +from together.generated.models.chat_completion_request import ChatCompletionRequest +from together.generated.models.chat_completion_response import ChatCompletionResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class ChatApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def chat_completions( + self, + chat_completion_request: Optional[ChatCompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ChatCompletionResponse: + """Create chat completion + + Query a chat model. + + :param chat_completion_request: + :type chat_completion_request: ChatCompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._chat_completions_serialize( + chat_completion_request=chat_completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ChatCompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def chat_completions_with_http_info( + self, + chat_completion_request: Optional[ChatCompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[ChatCompletionResponse]: + """Create chat completion + + Query a chat model. + + :param chat_completion_request: + :type chat_completion_request: ChatCompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._chat_completions_serialize( + chat_completion_request=chat_completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ChatCompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def chat_completions_without_preload_content( + self, + chat_completion_request: Optional[ChatCompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create chat completion + + Query a chat model. + + :param chat_completion_request: + :type chat_completion_request: ChatCompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._chat_completions_serialize( + chat_completion_request=chat_completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ChatCompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _chat_completions_serialize( + self, + chat_completion_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if chat_completion_request is not None: + _body_params = chat_completion_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json", "text/event-stream"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/chat/completions", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/completion_api.py b/src/together/generated/api/completion_api.py new file mode 100644 index 00000000..73f5e7fb --- /dev/null +++ b/src/together/generated/api/completion_api.py @@ -0,0 +1,308 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import Optional +from together.generated.models.completion_request import CompletionRequest +from together.generated.models.completion_response import CompletionResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class CompletionApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def completions( + self, + completion_request: Optional[CompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> CompletionResponse: + """Create completion + + Query a language, code, or image model. + + :param completion_request: + :type completion_request: CompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._completions_serialize( + completion_request=completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "CompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def completions_with_http_info( + self, + completion_request: Optional[CompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[CompletionResponse]: + """Create completion + + Query a language, code, or image model. + + :param completion_request: + :type completion_request: CompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._completions_serialize( + completion_request=completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "CompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def completions_without_preload_content( + self, + completion_request: Optional[CompletionRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create completion + + Query a language, code, or image model. + + :param completion_request: + :type completion_request: CompletionRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._completions_serialize( + completion_request=completion_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "CompletionResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _completions_serialize( + self, + completion_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if completion_request is not None: + _body_params = completion_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json", "text/event-stream"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/completions", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/embeddings_api.py b/src/together/generated/api/embeddings_api.py new file mode 100644 index 00000000..e2dea123 --- /dev/null +++ b/src/together/generated/api/embeddings_api.py @@ -0,0 +1,308 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import Optional +from together.generated.models.embeddings_request import EmbeddingsRequest +from together.generated.models.embeddings_response import EmbeddingsResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class EmbeddingsApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def embeddings( + self, + embeddings_request: Optional[EmbeddingsRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> EmbeddingsResponse: + """Create embedding + + Query an embedding model for a given string of text. + + :param embeddings_request: + :type embeddings_request: EmbeddingsRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._embeddings_serialize( + embeddings_request=embeddings_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "EmbeddingsResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def embeddings_with_http_info( + self, + embeddings_request: Optional[EmbeddingsRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[EmbeddingsResponse]: + """Create embedding + + Query an embedding model for a given string of text. + + :param embeddings_request: + :type embeddings_request: EmbeddingsRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._embeddings_serialize( + embeddings_request=embeddings_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "EmbeddingsResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def embeddings_without_preload_content( + self, + embeddings_request: Optional[EmbeddingsRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create embedding + + Query an embedding model for a given string of text. + + :param embeddings_request: + :type embeddings_request: EmbeddingsRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._embeddings_serialize( + embeddings_request=embeddings_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "EmbeddingsResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _embeddings_serialize( + self, + embeddings_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if embeddings_request is not None: + _body_params = embeddings_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/embeddings", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/endpoints_api.py b/src/together/generated/api/endpoints_api.py new file mode 100644 index 00000000..70c8824a --- /dev/null +++ b/src/together/generated/api/endpoints_api.py @@ -0,0 +1,1354 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import Field, StrictStr, field_validator +from typing import Optional +from typing_extensions import Annotated +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.list_endpoints200_response import ( + ListEndpoints200Response, +) +from together.generated.models.update_endpoint_request import UpdateEndpointRequest + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class EndpointsApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def create_endpoint( + self, + create_endpoint_request: CreateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> DedicatedEndpoint: + """Create a dedicated endpoint, it will start automatically + + Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + + :param create_endpoint_request: (required) + :type create_endpoint_request: CreateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._create_endpoint_serialize( + create_endpoint_request=create_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def create_endpoint_with_http_info( + self, + create_endpoint_request: CreateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[DedicatedEndpoint]: + """Create a dedicated endpoint, it will start automatically + + Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + + :param create_endpoint_request: (required) + :type create_endpoint_request: CreateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._create_endpoint_serialize( + create_endpoint_request=create_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def create_endpoint_without_preload_content( + self, + create_endpoint_request: CreateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create a dedicated endpoint, it will start automatically + + Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + + :param create_endpoint_request: (required) + :type create_endpoint_request: CreateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._create_endpoint_serialize( + create_endpoint_request=create_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _create_endpoint_serialize( + self, + create_endpoint_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if create_endpoint_request is not None: + _body_params = create_endpoint_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/endpoints", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def delete_endpoint( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to delete") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> None: + """Delete endpoint + + Permanently deletes an endpoint. This action cannot be undone. + + :param endpoint_id: The ID of the endpoint to delete (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._delete_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "204": None, + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def delete_endpoint_with_http_info( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to delete") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[None]: + """Delete endpoint + + Permanently deletes an endpoint. This action cannot be undone. + + :param endpoint_id: The ID of the endpoint to delete (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._delete_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "204": None, + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def delete_endpoint_without_preload_content( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to delete") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Delete endpoint + + Permanently deletes an endpoint. This action cannot be undone. + + :param endpoint_id: The ID of the endpoint to delete (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._delete_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "204": None, + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _delete_endpoint_serialize( + self, + endpoint_id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if endpoint_id is not None: + _path_params["endpointId"] = endpoint_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="DELETE", + resource_path="/endpoints/{endpointId}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def get_endpoint( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to retrieve") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> DedicatedEndpoint: + """Get endpoint by ID + + Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + + :param endpoint_id: The ID of the endpoint to retrieve (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._get_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def get_endpoint_with_http_info( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to retrieve") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[DedicatedEndpoint]: + """Get endpoint by ID + + Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + + :param endpoint_id: The ID of the endpoint to retrieve (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._get_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def get_endpoint_without_preload_content( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to retrieve") + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Get endpoint by ID + + Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + + :param endpoint_id: The ID of the endpoint to retrieve (required) + :type endpoint_id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._get_endpoint_serialize( + endpoint_id=endpoint_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _get_endpoint_serialize( + self, + endpoint_id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if endpoint_id is not None: + _path_params["endpointId"] = endpoint_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/endpoints/{endpointId}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def list_endpoints( + self, + type: Annotated[ + Optional[StrictStr], Field(description="Filter endpoints by type") + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ListEndpoints200Response: + """List all endpoints, can be filtered by type + + Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + + :param type: Filter endpoints by type + :type type: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_endpoints_serialize( + type=type, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListEndpoints200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def list_endpoints_with_http_info( + self, + type: Annotated[ + Optional[StrictStr], Field(description="Filter endpoints by type") + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[ListEndpoints200Response]: + """List all endpoints, can be filtered by type + + Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + + :param type: Filter endpoints by type + :type type: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_endpoints_serialize( + type=type, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListEndpoints200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def list_endpoints_without_preload_content( + self, + type: Annotated[ + Optional[StrictStr], Field(description="Filter endpoints by type") + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List all endpoints, can be filtered by type + + Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + + :param type: Filter endpoints by type + :type type: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_endpoints_serialize( + type=type, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListEndpoints200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _list_endpoints_serialize( + self, + type, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + if type is not None: + + _query_params.append(("type", type)) + + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/endpoints", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def update_endpoint( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to update") + ], + update_endpoint_request: UpdateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> DedicatedEndpoint: + """Update endpoint, this can also be used to start or stop a dedicated endpoint + + Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + + :param endpoint_id: The ID of the endpoint to update (required) + :type endpoint_id: str + :param update_endpoint_request: (required) + :type update_endpoint_request: UpdateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._update_endpoint_serialize( + endpoint_id=endpoint_id, + update_endpoint_request=update_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def update_endpoint_with_http_info( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to update") + ], + update_endpoint_request: UpdateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[DedicatedEndpoint]: + """Update endpoint, this can also be used to start or stop a dedicated endpoint + + Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + + :param endpoint_id: The ID of the endpoint to update (required) + :type endpoint_id: str + :param update_endpoint_request: (required) + :type update_endpoint_request: UpdateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._update_endpoint_serialize( + endpoint_id=endpoint_id, + update_endpoint_request=update_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def update_endpoint_without_preload_content( + self, + endpoint_id: Annotated[ + StrictStr, Field(description="The ID of the endpoint to update") + ], + update_endpoint_request: UpdateEndpointRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Update endpoint, this can also be used to start or stop a dedicated endpoint + + Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + + :param endpoint_id: The ID of the endpoint to update (required) + :type endpoint_id: str + :param update_endpoint_request: (required) + :type update_endpoint_request: UpdateEndpointRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._update_endpoint_serialize( + endpoint_id=endpoint_id, + update_endpoint_request=update_endpoint_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "DedicatedEndpoint", + "403": "ErrorData", + "404": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _update_endpoint_serialize( + self, + endpoint_id, + update_endpoint_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if endpoint_id is not None: + _path_params["endpointId"] = endpoint_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if update_endpoint_request is not None: + _body_params = update_endpoint_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="PATCH", + resource_path="/endpoints/{endpointId}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/files_api.py b/src/together/generated/api/files_api.py new file mode 100644 index 00000000..1981fc9e --- /dev/null +++ b/src/together/generated/api/files_api.py @@ -0,0 +1,996 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import StrictStr +from together.generated.models.file_delete_response import FileDeleteResponse +from together.generated.models.file_list import FileList +from together.generated.models.file_object import FileObject +from together.generated.models.file_response import FileResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class FilesApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def files_get( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FileList: + """List all files + + List the metadata for all uploaded data files. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def files_get_with_http_info( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FileList]: + """List all files + + List the metadata for all uploaded data files. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def files_get_without_preload_content( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List all files + + List the metadata for all uploaded data files. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _files_get_serialize( + self, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/files", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def files_id_content_get( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FileObject: + """Get file contents + + Get the contents of a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_content_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileObject", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def files_id_content_get_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FileObject]: + """Get file contents + + Get the contents of a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_content_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileObject", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def files_id_content_get_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Get file contents + + Get the contents of a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_content_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileObject", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _files_id_content_get_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/files/{id}/content", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def files_id_delete( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FileDeleteResponse: + """Delete a file + + Delete a previously uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_delete_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileDeleteResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def files_id_delete_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FileDeleteResponse]: + """Delete a file + + Delete a previously uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_delete_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileDeleteResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def files_id_delete_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Delete a file + + Delete a previously uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_delete_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileDeleteResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _files_id_delete_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="DELETE", + resource_path="/files/{id}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def files_id_get( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FileResponse: + """List file + + List the metadata for a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def files_id_get_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FileResponse]: + """List file + + List the metadata for a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def files_id_get_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List file + + List the metadata for a single uploaded data file. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._files_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FileResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _files_id_get_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/files/{id}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/fine_tuning_api.py b/src/together/generated/api/fine_tuning_api.py new file mode 100644 index 00000000..d25eb662 --- /dev/null +++ b/src/together/generated/api/fine_tuning_api.py @@ -0,0 +1,1630 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import Field, StrictInt, StrictStr, field_validator +from typing import Optional +from typing_extensions import Annotated +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest +from together.generated.models.finetune_download_result import FinetuneDownloadResult +from together.generated.models.finetune_list import FinetuneList +from together.generated.models.finetune_list_events import FinetuneListEvents +from together.generated.models.finetune_response import FinetuneResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class FineTuningApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def fine_tunes_get( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneList: + """List all jobs + + List the metadata for all fine-tuning jobs. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_get_with_http_info( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneList]: + """List all jobs + + List the metadata for all fine-tuning jobs. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_get_without_preload_content( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List all jobs + + List the metadata for all fine-tuning jobs. + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneList", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_get_serialize( + self, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/fine-tunes", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def fine_tunes_id_cancel_post( + self, + id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to cancel. A string that starts with `ft-`." + ), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneResponse: + """Cancel job + + Cancel a currently running fine-tuning job. + + :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_cancel_post_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_id_cancel_post_with_http_info( + self, + id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to cancel. A string that starts with `ft-`." + ), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneResponse]: + """Cancel job + + Cancel a currently running fine-tuning job. + + :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_cancel_post_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_id_cancel_post_without_preload_content( + self, + id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to cancel. A string that starts with `ft-`." + ), + ], + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Cancel job + + Cancel a currently running fine-tuning job. + + :param id: Fine-tune ID to cancel. A string that starts with `ft-`. (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_cancel_post_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_id_cancel_post_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/fine-tunes/{id}/cancel", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def fine_tunes_id_events_get( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneListEvents: + """List job events + + List the events for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_events_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneListEvents", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_id_events_get_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneListEvents]: + """List job events + + List the events for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_events_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneListEvents", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_id_events_get_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List job events + + List the events for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_events_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneListEvents", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_id_events_get_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/fine-tunes/{id}/events", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def fine_tunes_id_get( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneResponse: + """List job + + List the metadata for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_id_get_with_http_info( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneResponse]: + """List job + + List the metadata for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_id_get_without_preload_content( + self, + id: StrictStr, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List job + + List the metadata for a single fine-tuning job. + + :param id: (required) + :type id: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_id_get_serialize( + id=id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_id_get_serialize( + self, + id, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + if id is not None: + _path_params["id"] = id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/fine-tunes/{id}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def fine_tunes_post( + self, + fine_tunes_post_request: FineTunesPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneResponse: + """Create job + + Use a model to create a fine-tuning job. + + :param fine_tunes_post_request: (required) + :type fine_tunes_post_request: FineTunesPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_post_serialize( + fine_tunes_post_request=fine_tunes_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def fine_tunes_post_with_http_info( + self, + fine_tunes_post_request: FineTunesPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneResponse]: + """Create job + + Use a model to create a fine-tuning job. + + :param fine_tunes_post_request: (required) + :type fine_tunes_post_request: FineTunesPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_post_serialize( + fine_tunes_post_request=fine_tunes_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def fine_tunes_post_without_preload_content( + self, + fine_tunes_post_request: FineTunesPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create job + + Use a model to create a fine-tuning job. + + :param fine_tunes_post_request: (required) + :type fine_tunes_post_request: FineTunesPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._fine_tunes_post_serialize( + fine_tunes_post_request=fine_tunes_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _fine_tunes_post_serialize( + self, + fine_tunes_post_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if fine_tunes_post_request is not None: + _body_params = fine_tunes_post_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/fine-tunes", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + @validate_call + async def finetune_download_get( + self, + ft_id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to download. A string that starts with `ft-`." + ), + ], + checkpoint_step: Annotated[ + Optional[StrictInt], + Field( + description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." + ), + ] = None, + checkpoint: Annotated[ + Optional[StrictStr], + Field( + description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." + ), + ] = None, + output: Annotated[ + Optional[StrictStr], + Field( + description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." + ), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> FinetuneDownloadResult: + """Download model + + Download a compressed fine-tuned model or checkpoint to local disk. + + :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) + :type ft_id: str + :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. + :type checkpoint_step: int + :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. + :type checkpoint: str + :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. + :type output: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._finetune_download_get_serialize( + ft_id=ft_id, + checkpoint_step=checkpoint_step, + checkpoint=checkpoint, + output=output, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneDownloadResult", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def finetune_download_get_with_http_info( + self, + ft_id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to download. A string that starts with `ft-`." + ), + ], + checkpoint_step: Annotated[ + Optional[StrictInt], + Field( + description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." + ), + ] = None, + checkpoint: Annotated[ + Optional[StrictStr], + Field( + description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." + ), + ] = None, + output: Annotated[ + Optional[StrictStr], + Field( + description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." + ), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[FinetuneDownloadResult]: + """Download model + + Download a compressed fine-tuned model or checkpoint to local disk. + + :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) + :type ft_id: str + :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. + :type checkpoint_step: int + :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. + :type checkpoint: str + :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. + :type output: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._finetune_download_get_serialize( + ft_id=ft_id, + checkpoint_step=checkpoint_step, + checkpoint=checkpoint, + output=output, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneDownloadResult", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def finetune_download_get_without_preload_content( + self, + ft_id: Annotated[ + StrictStr, + Field( + description="Fine-tune ID to download. A string that starts with `ft-`." + ), + ], + checkpoint_step: Annotated[ + Optional[StrictInt], + Field( + description="Specifies step number for checkpoint to download. Ignores `checkpoint` value if set." + ), + ] = None, + checkpoint: Annotated[ + Optional[StrictStr], + Field( + description="Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set." + ), + ] = None, + output: Annotated[ + Optional[StrictStr], + Field( + description="Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`." + ), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Download model + + Download a compressed fine-tuned model or checkpoint to local disk. + + :param ft_id: Fine-tune ID to download. A string that starts with `ft-`. (required) + :type ft_id: str + :param checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. + :type checkpoint_step: int + :param checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. + :type checkpoint: str + :param output: Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. + :type output: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._finetune_download_get_serialize( + ft_id=ft_id, + checkpoint_step=checkpoint_step, + checkpoint=checkpoint, + output=output, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "FinetuneDownloadResult", + "400": None, + "404": None, + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _finetune_download_get_serialize( + self, + ft_id, + checkpoint_step, + checkpoint, + output, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + if ft_id is not None: + + _query_params.append(("ft_id", ft_id)) + + if checkpoint_step is not None: + + _query_params.append(("checkpoint_step", checkpoint_step)) + + if checkpoint is not None: + + _query_params.append(("checkpoint", checkpoint)) + + if output is not None: + + _query_params.append(("output", output)) + + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/finetune/download", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/hardware_api.py b/src/together/generated/api/hardware_api.py new file mode 100644 index 00000000..0f8e78fd --- /dev/null +++ b/src/together/generated/api/hardware_api.py @@ -0,0 +1,298 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from pydantic import Field, StrictStr +from typing import Optional +from typing_extensions import Annotated +from together.generated.models.list_hardware200_response import ListHardware200Response + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class HardwareApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def list_hardware( + self, + model: Annotated[ + Optional[StrictStr], + Field(description="Filter hardware configurations by model compatibility"), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ListHardware200Response: + """List available hardware configurations + + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + + :param model: Filter hardware configurations by model compatibility + :type model: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_hardware_serialize( + model=model, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListHardware200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def list_hardware_with_http_info( + self, + model: Annotated[ + Optional[StrictStr], + Field(description="Filter hardware configurations by model compatibility"), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[ListHardware200Response]: + """List available hardware configurations + + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + + :param model: Filter hardware configurations by model compatibility + :type model: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_hardware_serialize( + model=model, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListHardware200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def list_hardware_without_preload_content( + self, + model: Annotated[ + Optional[StrictStr], + Field(description="Filter hardware configurations by model compatibility"), + ] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List available hardware configurations + + Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + + :param model: Filter hardware configurations by model compatibility + :type model: str + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._list_hardware_serialize( + model=model, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ListHardware200Response", + "403": "ErrorData", + "500": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _list_hardware_serialize( + self, + model, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + if model is not None: + + _query_params.append(("model", model)) + + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/hardware", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/images_api.py b/src/together/generated/api/images_api.py new file mode 100644 index 00000000..a61365e8 --- /dev/null +++ b/src/together/generated/api/images_api.py @@ -0,0 +1,291 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from together.generated.models.image_response import ImageResponse +from together.generated.models.images_generations_post_request import ( + ImagesGenerationsPostRequest, +) + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class ImagesApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def images_generations_post( + self, + images_generations_post_request: ImagesGenerationsPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ImageResponse: + """Create image + + Use an image model to generate an image for a given prompt. + + :param images_generations_post_request: (required) + :type images_generations_post_request: ImagesGenerationsPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._images_generations_post_serialize( + images_generations_post_request=images_generations_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ImageResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def images_generations_post_with_http_info( + self, + images_generations_post_request: ImagesGenerationsPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[ImageResponse]: + """Create image + + Use an image model to generate an image for a given prompt. + + :param images_generations_post_request: (required) + :type images_generations_post_request: ImagesGenerationsPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._images_generations_post_serialize( + images_generations_post_request=images_generations_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ImageResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def images_generations_post_without_preload_content( + self, + images_generations_post_request: ImagesGenerationsPostRequest, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create image + + Use an image model to generate an image for a given prompt. + + :param images_generations_post_request: (required) + :type images_generations_post_request: ImagesGenerationsPostRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._images_generations_post_serialize( + images_generations_post_request=images_generations_post_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "ImageResponse", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _images_generations_post_serialize( + self, + images_generations_post_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if images_generations_post_request is not None: + _body_params = images_generations_post_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/images/generations", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/models_api.py b/src/together/generated/api/models_api.py new file mode 100644 index 00000000..bb2d0e7b --- /dev/null +++ b/src/together/generated/api/models_api.py @@ -0,0 +1,279 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import List +from together.generated.models.model_info import ModelInfo + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class ModelsApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def models( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> List[ModelInfo]: + """List all models + + Lists all of Together's open-source models + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._models_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "List[ModelInfo]", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def models_with_http_info( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[List[ModelInfo]]: + """List all models + + Lists all of Together's open-source models + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._models_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "List[ModelInfo]", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def models_without_preload_content( + self, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """List all models + + Lists all of Together's open-source models + + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._models_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "List[ModelInfo]", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _models_serialize( + self, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="GET", + resource_path="/models", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api/rerank_api.py b/src/together/generated/api/rerank_api.py new file mode 100644 index 00000000..2b4f99ae --- /dev/null +++ b/src/together/generated/api/rerank_api.py @@ -0,0 +1,308 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +import warnings +from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt +from typing import Any, Dict, List, Optional, Tuple, Union +from typing_extensions import Annotated + +from typing import Optional +from together.generated.models.rerank_request import RerankRequest +from together.generated.models.rerank_response import RerankResponse + +from together.generated.api_client import ApiClient, RequestSerialized +from together.generated.api_response import ApiResponse +from together.generated.rest import RESTResponseType + + +class RerankApi: + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + @validate_call + async def rerank( + self, + rerank_request: Optional[RerankRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RerankResponse: + """Create a rerank request + + Query a reranker model + + :param rerank_request: + :type rerank_request: RerankRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._rerank_serialize( + rerank_request=rerank_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RerankResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + @validate_call + async def rerank_with_http_info( + self, + rerank_request: Optional[RerankRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> ApiResponse[RerankResponse]: + """Create a rerank request + + Query a reranker model + + :param rerank_request: + :type rerank_request: RerankRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._rerank_serialize( + rerank_request=rerank_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RerankResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + await response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + @validate_call + async def rerank_without_preload_content( + self, + rerank_request: Optional[RerankRequest] = None, + _request_timeout: Union[ + None, + Annotated[StrictFloat, Field(gt=0)], + Tuple[ + Annotated[StrictFloat, Field(gt=0)], Annotated[StrictFloat, Field(gt=0)] + ], + ] = None, + _request_auth: Optional[Dict[StrictStr, Any]] = None, + _content_type: Optional[StrictStr] = None, + _headers: Optional[Dict[StrictStr, Any]] = None, + _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0, + ) -> RESTResponseType: + """Create a rerank request + + Query a reranker model + + :param rerank_request: + :type rerank_request: RerankRequest + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the + authentication in the spec for a single request. + :type _request_auth: dict, optional + :param _content_type: force content-type for the request. + :type _content_type: str, Optional + :param _headers: set to override the headers for a single + request; this effectively ignores the headers + in the spec for a single request. + :type _headers: dict, optional + :param _host_index: set to override the host_index for a single + request; this effectively ignores the host_index + in the spec for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ # noqa: E501 + + _param = self._rerank_serialize( + rerank_request=rerank_request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RerankResponse", + "400": "ErrorData", + "401": "ErrorData", + "404": "ErrorData", + "429": "ErrorData", + "503": "ErrorData", + "504": "ErrorData", + } + response_data = await self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _rerank_serialize( + self, + rerank_request, + _request_auth, + _content_type, + _headers, + _host_index, + ) -> RequestSerialized: + + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Optional[bytes] = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if rerank_request is not None: + _body_params = rerank_request + + # set the HTTP header `Accept` + if "Accept" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = ["bearerAuth"] + + return self.api_client.param_serialize( + method="POST", + resource_path="/rerank", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/src/together/generated/api_client.py b/src/together/generated/api_client.py new file mode 100644 index 00000000..bfdac8dc --- /dev/null +++ b/src/together/generated/api_client.py @@ -0,0 +1,758 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import datetime +from dateutil.parser import parse +from enum import Enum +import decimal +import json +import mimetypes +import os +import re +import tempfile + +from urllib.parse import quote +from typing import Tuple, Optional, List, Dict, Union +from pydantic import SecretStr + +from together.generated.configuration import Configuration +from together.generated.api_response import ApiResponse, T as ApiResponseT +import together.generated.models +from together.generated import rest +from together.generated.exceptions import ( + ApiValueError, + ApiException, + BadRequestException, + UnauthorizedException, + ForbiddenException, + NotFoundException, + ServiceException, +) + +RequestSerialized = Tuple[str, str, Dict[str, str], Optional[str], List[str]] + + +class ApiClient: + """Generic API client for OpenAPI client library builds. + + OpenAPI generic API client. This client handles the client- + server communication, and is invariant across implementations. Specifics of + the methods and models for each application are generated from the OpenAPI + templates. + + :param configuration: .Configuration object for this client + :param header_name: a header to pass when making calls to the API. + :param header_value: a header value to pass when making calls to + the API. + :param cookie: a cookie to include in the header when making calls + to the API + """ + + PRIMITIVE_TYPES = (float, bool, bytes, str, int) + NATIVE_TYPES_MAPPING = { + "int": int, + "long": int, # TODO remove as only py3 is supported? + "float": float, + "str": str, + "bool": bool, + "date": datetime.date, + "datetime": datetime.datetime, + "decimal": decimal.Decimal, + "object": object, + } + _pool = None + + def __init__( + self, configuration=None, header_name=None, header_value=None, cookie=None + ) -> None: + # use default configuration if none is provided + if configuration is None: + configuration = Configuration.get_default() + self.configuration = configuration + + self.rest_client = rest.RESTClientObject(configuration) + self.default_headers = {} + if header_name is not None: + self.default_headers[header_name] = header_value + self.cookie = cookie + # Set default User-Agent. + self.user_agent = "OpenAPI-Generator/1.0.0/python" + self.client_side_validation = configuration.client_side_validation + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + await self.close() + + async def close(self): + await self.rest_client.close() + + @property + def user_agent(self): + """User agent for this API client""" + return self.default_headers["User-Agent"] + + @user_agent.setter + def user_agent(self, value): + self.default_headers["User-Agent"] = value + + def set_default_header(self, header_name, header_value): + self.default_headers[header_name] = header_value + + _default = None + + @classmethod + def get_default(cls): + """Return new instance of ApiClient. + + This method returns newly created, based on default constructor, + object of ApiClient class or returns a copy of default + ApiClient. + + :return: The ApiClient object. + """ + if cls._default is None: + cls._default = ApiClient() + return cls._default + + @classmethod + def set_default(cls, default): + """Set default instance of ApiClient. + + It stores default ApiClient. + + :param default: object of ApiClient. + """ + cls._default = default + + def param_serialize( + self, + method, + resource_path, + path_params=None, + query_params=None, + header_params=None, + body=None, + post_params=None, + files=None, + auth_settings=None, + collection_formats=None, + _host=None, + _request_auth=None, + ) -> RequestSerialized: + """Builds the HTTP request params needed by the request. + :param method: Method to call. + :param resource_path: Path to method endpoint. + :param path_params: Path parameters in the url. + :param query_params: Query parameters in the url. + :param header_params: Header parameters to be + placed in the request header. + :param body: Request body. + :param post_params dict: Request post form parameters, + for `application/x-www-form-urlencoded`, `multipart/form-data`. + :param auth_settings list: Auth Settings names for the request. + :param files dict: key -> filename, value -> filepath, + for `multipart/form-data`. + :param collection_formats: dict of collection formats for path, query, + header, and post parameters. + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the authentication + in the spec for a single request. + :return: tuple of form (path, http_method, query_params, header_params, + body, post_params, files) + """ + + config = self.configuration + + # header parameters + header_params = header_params or {} + header_params.update(self.default_headers) + if self.cookie: + header_params["Cookie"] = self.cookie + if header_params: + header_params = self.sanitize_for_serialization(header_params) + header_params = dict( + self.parameters_to_tuples(header_params, collection_formats) + ) + + # path parameters + if path_params: + path_params = self.sanitize_for_serialization(path_params) + path_params = self.parameters_to_tuples(path_params, collection_formats) + for k, v in path_params: + # specified safe chars, encode everything + resource_path = resource_path.replace( + "{%s}" % k, quote(str(v), safe=config.safe_chars_for_path_param) + ) + + # post parameters + if post_params or files: + post_params = post_params if post_params else [] + post_params = self.sanitize_for_serialization(post_params) + post_params = self.parameters_to_tuples(post_params, collection_formats) + if files: + post_params.extend(self.files_parameters(files)) + + # auth setting + self.update_params_for_auth( + header_params, + query_params, + auth_settings, + resource_path, + method, + body, + request_auth=_request_auth, + ) + + # body + if body: + body = self.sanitize_for_serialization(body) + + # request url + if _host is None or self.configuration.ignore_operation_servers: + url = self.configuration.host + resource_path + else: + # use server/host defined in path or operation instead + url = _host + resource_path + + # query parameters + if query_params: + query_params = self.sanitize_for_serialization(query_params) + url_query = self.parameters_to_url_query(query_params, collection_formats) + url += "?" + url_query + + return method, url, header_params, body, post_params + + async def call_api( + self, + method, + url, + header_params=None, + body=None, + post_params=None, + _request_timeout=None, + ) -> rest.RESTResponse: + """Makes the HTTP request (synchronous) + :param method: Method to call. + :param url: Path to method endpoint. + :param header_params: Header parameters to be + placed in the request header. + :param body: Request body. + :param post_params dict: Request post form parameters, + for `application/x-www-form-urlencoded`, `multipart/form-data`. + :param _request_timeout: timeout setting for this request. + :return: RESTResponse + """ + + try: + # perform request and return response + response_data = await self.rest_client.request( + method, + url, + headers=header_params, + body=body, + post_params=post_params, + _request_timeout=_request_timeout, + ) + + except ApiException as e: + raise e + + return response_data + + def response_deserialize( + self, + response_data: rest.RESTResponse, + response_types_map: Optional[Dict[str, ApiResponseT]] = None, + ) -> ApiResponse[ApiResponseT]: + """Deserializes response into an object. + :param response_data: RESTResponse object to be deserialized. + :param response_types_map: dict of response types. + :return: ApiResponse + """ + + msg = "RESTResponse.read() must be called before passing it to response_deserialize()" + assert response_data.data is not None, msg + + response_type = response_types_map.get(str(response_data.status), None) + if ( + not response_type + and isinstance(response_data.status, int) + and 100 <= response_data.status <= 599 + ): + # if not found, look for '1XX', '2XX', etc. + response_type = response_types_map.get( + str(response_data.status)[0] + "XX", None + ) + + # deserialize response data + response_text = None + return_data = None + try: + if response_type == "bytearray": + return_data = response_data.data + elif response_type == "file": + return_data = self.__deserialize_file(response_data) + elif response_type is not None: + match = None + content_type = response_data.getheader("content-type") + if content_type is not None: + match = re.search(r"charset=([a-zA-Z\-\d]+)[\s;]?", content_type) + encoding = match.group(1) if match else "utf-8" + response_text = response_data.data.decode(encoding) + return_data = self.deserialize( + response_text, response_type, content_type + ) + finally: + if not 200 <= response_data.status <= 299: + raise ApiException.from_response( + http_resp=response_data, + body=response_text, + data=return_data, + ) + + return ApiResponse( + status_code=response_data.status, + data=return_data, + headers=response_data.getheaders(), + raw_data=response_data.data, + ) + + def sanitize_for_serialization(self, obj): + """Builds a JSON POST object. + + If obj is None, return None. + If obj is SecretStr, return obj.get_secret_value() + If obj is str, int, long, float, bool, return directly. + If obj is datetime.datetime, datetime.date + convert to string in iso8601 format. + If obj is decimal.Decimal return string representation. + If obj is list, sanitize each element in the list. + If obj is dict, return the dict. + If obj is OpenAPI model, return the properties dict. + + :param obj: The data to serialize. + :return: The serialized form of data. + """ + if obj is None: + return None + elif isinstance(obj, Enum): + return obj.value + elif isinstance(obj, SecretStr): + return obj.get_secret_value() + elif isinstance(obj, self.PRIMITIVE_TYPES): + return obj + elif isinstance(obj, list): + return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] + elif isinstance(obj, tuple): + return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) + elif isinstance(obj, (datetime.datetime, datetime.date)): + return obj.isoformat() + elif isinstance(obj, decimal.Decimal): + return str(obj) + + elif isinstance(obj, dict): + obj_dict = obj + else: + # Convert model obj to dict except + # attributes `openapi_types`, `attribute_map` + # and attributes which value is not None. + # Convert attribute name to json key in + # model definition for request. + if hasattr(obj, "to_dict") and callable(getattr(obj, "to_dict")): + obj_dict = obj.to_dict() + else: + obj_dict = obj.__dict__ + + return { + key: self.sanitize_for_serialization(val) for key, val in obj_dict.items() + } + + def deserialize( + self, response_text: str, response_type: str, content_type: Optional[str] + ): + """Deserializes response into an object. + + :param response: RESTResponse object to be deserialized. + :param response_type: class literal for + deserialized object, or string of class name. + :param content_type: content type of response. + + :return: deserialized object. + """ + + # fetch data from response object + if content_type is None: + try: + data = json.loads(response_text) + except ValueError: + data = response_text + elif re.match( + r"^application/(json|[\w!#$&.+-^_]+\+json)\s*(;|$)", + content_type, + re.IGNORECASE, + ): + if response_text == "": + data = "" + else: + data = json.loads(response_text) + elif re.match(r"^text\/[a-z.+-]+\s*(;|$)", content_type, re.IGNORECASE): + data = response_text + else: + raise ApiException( + status=0, reason="Unsupported content type: {0}".format(content_type) + ) + + return self.__deserialize(data, response_type) + + def __deserialize(self, data, klass): + """Deserializes dict, list, str into an object. + + :param data: dict, list or str. + :param klass: class literal, or string of class name. + + :return: object. + """ + if data is None: + return None + + if isinstance(klass, str): + if klass.startswith("List["): + m = re.match(r"List\[(.*)]", klass) + assert m is not None, "Malformed List type definition" + sub_kls = m.group(1) + return [self.__deserialize(sub_data, sub_kls) for sub_data in data] + + if klass.startswith("Dict["): + m = re.match(r"Dict\[([^,]*), (.*)]", klass) + assert m is not None, "Malformed Dict type definition" + sub_kls = m.group(2) + return {k: self.__deserialize(v, sub_kls) for k, v in data.items()} + + # convert str to class + if klass in self.NATIVE_TYPES_MAPPING: + klass = self.NATIVE_TYPES_MAPPING[klass] + else: + klass = getattr(together.generated.models, klass) + + if klass in self.PRIMITIVE_TYPES: + return self.__deserialize_primitive(data, klass) + elif klass == object: + return self.__deserialize_object(data) + elif klass == datetime.date: + return self.__deserialize_date(data) + elif klass == datetime.datetime: + return self.__deserialize_datetime(data) + elif klass == decimal.Decimal: + return decimal.Decimal(data) + elif issubclass(klass, Enum): + return self.__deserialize_enum(data, klass) + else: + return self.__deserialize_model(data, klass) + + def parameters_to_tuples(self, params, collection_formats): + """Get parameters as list of tuples, formatting collections. + + :param params: Parameters as dict or list of two-tuples + :param dict collection_formats: Parameter collection formats + :return: Parameters as list of tuples, collections formatted + """ + new_params: List[Tuple[str, str]] = [] + if collection_formats is None: + collection_formats = {} + for k, v in params.items() if isinstance(params, dict) else params: + if k in collection_formats: + collection_format = collection_formats[k] + if collection_format == "multi": + new_params.extend((k, value) for value in v) + else: + if collection_format == "ssv": + delimiter = " " + elif collection_format == "tsv": + delimiter = "\t" + elif collection_format == "pipes": + delimiter = "|" + else: # csv is the default + delimiter = "," + new_params.append((k, delimiter.join(str(value) for value in v))) + else: + new_params.append((k, v)) + return new_params + + def parameters_to_url_query(self, params, collection_formats): + """Get parameters as list of tuples, formatting collections. + + :param params: Parameters as dict or list of two-tuples + :param dict collection_formats: Parameter collection formats + :return: URL query string (e.g. a=Hello%20World&b=123) + """ + new_params: List[Tuple[str, str]] = [] + if collection_formats is None: + collection_formats = {} + for k, v in params.items() if isinstance(params, dict) else params: + if isinstance(v, bool): + v = str(v).lower() + if isinstance(v, (int, float)): + v = str(v) + if isinstance(v, dict): + v = json.dumps(v) + + if k in collection_formats: + collection_format = collection_formats[k] + if collection_format == "multi": + new_params.extend((k, quote(str(value))) for value in v) + else: + if collection_format == "ssv": + delimiter = " " + elif collection_format == "tsv": + delimiter = "\t" + elif collection_format == "pipes": + delimiter = "|" + else: # csv is the default + delimiter = "," + new_params.append( + (k, delimiter.join(quote(str(value)) for value in v)) + ) + else: + new_params.append((k, quote(str(v)))) + + return "&".join(["=".join(map(str, item)) for item in new_params]) + + def files_parameters( + self, + files: Dict[str, Union[str, bytes, List[str], List[bytes], Tuple[str, bytes]]], + ): + """Builds form parameters. + + :param files: File parameters. + :return: Form parameters with files. + """ + params = [] + for k, v in files.items(): + if isinstance(v, str): + with open(v, "rb") as f: + filename = os.path.basename(f.name) + filedata = f.read() + elif isinstance(v, bytes): + filename = k + filedata = v + elif isinstance(v, tuple): + filename, filedata = v + elif isinstance(v, list): + for file_param in v: + params.extend(self.files_parameters({k: file_param})) + continue + else: + raise ValueError("Unsupported file value") + mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream" + params.append(tuple([k, tuple([filename, filedata, mimetype])])) + return params + + def select_header_accept(self, accepts: List[str]) -> Optional[str]: + """Returns `Accept` based on an array of accepts provided. + + :param accepts: List of headers. + :return: Accept (e.g. application/json). + """ + if not accepts: + return None + + for accept in accepts: + if re.search("json", accept, re.IGNORECASE): + return accept + + return accepts[0] + + def select_header_content_type(self, content_types): + """Returns `Content-Type` based on an array of content_types provided. + + :param content_types: List of content-types. + :return: Content-Type (e.g. application/json). + """ + if not content_types: + return None + + for content_type in content_types: + if re.search("json", content_type, re.IGNORECASE): + return content_type + + return content_types[0] + + def update_params_for_auth( + self, + headers, + queries, + auth_settings, + resource_path, + method, + body, + request_auth=None, + ) -> None: + """Updates header and query params based on authentication setting. + + :param headers: Header parameters dict to be updated. + :param queries: Query parameters tuple list to be updated. + :param auth_settings: Authentication setting identifiers list. + :resource_path: A string representation of the HTTP request resource path. + :method: A string representation of the HTTP request method. + :body: A object representing the body of the HTTP request. + The object type is the return value of sanitize_for_serialization(). + :param request_auth: if set, the provided settings will + override the token in the configuration. + """ + if not auth_settings: + return + + if request_auth: + self._apply_auth_params( + headers, queries, resource_path, method, body, request_auth + ) + else: + for auth in auth_settings: + auth_setting = self.configuration.auth_settings().get(auth) + if auth_setting: + self._apply_auth_params( + headers, queries, resource_path, method, body, auth_setting + ) + + def _apply_auth_params( + self, headers, queries, resource_path, method, body, auth_setting + ) -> None: + """Updates the request parameters based on a single auth_setting + + :param headers: Header parameters dict to be updated. + :param queries: Query parameters tuple list to be updated. + :resource_path: A string representation of the HTTP request resource path. + :method: A string representation of the HTTP request method. + :body: A object representing the body of the HTTP request. + The object type is the return value of sanitize_for_serialization(). + :param auth_setting: auth settings for the endpoint + """ + if auth_setting["in"] == "cookie": + headers["Cookie"] = auth_setting["value"] + elif auth_setting["in"] == "header": + if auth_setting["type"] != "http-signature": + headers[auth_setting["key"]] = auth_setting["value"] + elif auth_setting["in"] == "query": + queries.append((auth_setting["key"], auth_setting["value"])) + else: + raise ApiValueError("Authentication token must be in `query` or `header`") + + def __deserialize_file(self, response): + """Deserializes body to file + + Saves response body into a file in a temporary folder, + using the filename from the `Content-Disposition` header if provided. + + handle file downloading + save response body into a tmp file and return the instance + + :param response: RESTResponse. + :return: file path. + """ + fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path) + os.close(fd) + os.remove(path) + + content_disposition = response.getheader("Content-Disposition") + if content_disposition: + m = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition) + assert m is not None, "Unexpected 'content-disposition' header value" + filename = m.group(1) + path = os.path.join(os.path.dirname(path), filename) + + with open(path, "wb") as f: + f.write(response.data) + + return path + + def __deserialize_primitive(self, data, klass): + """Deserializes string to primitive type. + + :param data: str. + :param klass: class literal. + + :return: int, long, float, str, bool. + """ + try: + return klass(data) + except UnicodeEncodeError: + return str(data) + except TypeError: + return data + + def __deserialize_object(self, value): + """Return an original value. + + :return: object. + """ + return value + + def __deserialize_date(self, string): + """Deserializes string to date. + + :param string: str. + :return: date. + """ + try: + return parse(string).date() + except ImportError: + return string + except ValueError: + raise rest.ApiException( + status=0, reason="Failed to parse `{0}` as date object".format(string) + ) + + def __deserialize_datetime(self, string): + """Deserializes string to datetime. + + The string should be in iso8601 datetime format. + + :param string: str. + :return: datetime. + """ + try: + return parse(string) + except ImportError: + return string + except ValueError: + raise rest.ApiException( + status=0, + reason=("Failed to parse `{0}` as datetime object".format(string)), + ) + + def __deserialize_enum(self, data, klass): + """Deserializes primitive type to enum. + + :param data: primitive type. + :param klass: class literal. + :return: enum value. + """ + try: + return klass(data) + except ValueError: + raise rest.ApiException( + status=0, reason=("Failed to parse `{0}` as `{1}`".format(data, klass)) + ) + + def __deserialize_model(self, data, klass): + """Deserializes list or dict to model. + + :param data: dict, list. + :param klass: class literal. + :return: model object. + """ + + return klass.from_dict(data) diff --git a/src/together/generated/api_response.py b/src/together/generated/api_response.py new file mode 100644 index 00000000..1ce13729 --- /dev/null +++ b/src/together/generated/api_response.py @@ -0,0 +1,20 @@ +"""API response object.""" + +from __future__ import annotations +from typing import Optional, Generic, Mapping, TypeVar +from pydantic import Field, StrictInt, StrictBytes, BaseModel + +T = TypeVar("T") + + +class ApiResponse(BaseModel, Generic[T]): + """ + API response object + """ + + status_code: StrictInt = Field(description="HTTP status code") + headers: Optional[Mapping[str, str]] = Field(None, description="HTTP headers") + data: T = Field(description="Deserialized data given the data type") + raw_data: StrictBytes = Field(description="Raw data (HTTP response body)") + + model_config = {"arbitrary_types_allowed": True} diff --git a/src/together/generated/configuration.py b/src/together/generated/configuration.py new file mode 100644 index 00000000..603014b0 --- /dev/null +++ b/src/together/generated/configuration.py @@ -0,0 +1,583 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import copy +import http.client as httplib +import logging +from logging import FileHandler +import sys +from typing import Any, ClassVar, Dict, List, Literal, Optional, TypedDict +from typing_extensions import NotRequired, Self + +import urllib3 + + +JSON_SCHEMA_VALIDATION_KEYWORDS = { + "multipleOf", + "maximum", + "exclusiveMaximum", + "minimum", + "exclusiveMinimum", + "maxLength", + "minLength", + "pattern", + "maxItems", + "minItems", +} + +ServerVariablesT = Dict[str, str] + +GenericAuthSetting = TypedDict( + "GenericAuthSetting", + { + "type": str, + "in": str, + "key": str, + "value": str, + }, +) + + +OAuth2AuthSetting = TypedDict( + "OAuth2AuthSetting", + { + "type": Literal["oauth2"], + "in": Literal["header"], + "key": Literal["Authorization"], + "value": str, + }, +) + + +APIKeyAuthSetting = TypedDict( + "APIKeyAuthSetting", + { + "type": Literal["api_key"], + "in": str, + "key": str, + "value": Optional[str], + }, +) + + +BasicAuthSetting = TypedDict( + "BasicAuthSetting", + { + "type": Literal["basic"], + "in": Literal["header"], + "key": Literal["Authorization"], + "value": Optional[str], + }, +) + + +BearerFormatAuthSetting = TypedDict( + "BearerFormatAuthSetting", + { + "type": Literal["bearer"], + "in": Literal["header"], + "format": Literal["JWT"], + "key": Literal["Authorization"], + "value": str, + }, +) + + +BearerAuthSetting = TypedDict( + "BearerAuthSetting", + { + "type": Literal["bearer"], + "in": Literal["header"], + "key": Literal["Authorization"], + "value": str, + }, +) + + +HTTPSignatureAuthSetting = TypedDict( + "HTTPSignatureAuthSetting", + { + "type": Literal["http-signature"], + "in": Literal["header"], + "key": Literal["Authorization"], + "value": None, + }, +) + + +AuthSettings = TypedDict( + "AuthSettings", + { + "bearerAuth": BearerAuthSetting, + }, + total=False, +) + + +class HostSettingVariable(TypedDict): + description: str + default_value: str + enum_values: List[str] + + +class HostSetting(TypedDict): + url: str + description: str + variables: NotRequired[Dict[str, HostSettingVariable]] + + +class Configuration: + """This class contains various settings of the API client. + + :param host: Base url. + :param ignore_operation_servers + Boolean to ignore operation servers for the API client. + Config will use `host` as the base url regardless of the operation servers. + :param api_key: Dict to store API key(s). + Each entry in the dict specifies an API key. + The dict key is the name of the security scheme in the OAS specification. + The dict value is the API key secret. + :param api_key_prefix: Dict to store API prefix (e.g. Bearer). + The dict key is the name of the security scheme in the OAS specification. + The dict value is an API key prefix when generating the auth data. + :param username: Username for HTTP basic authentication. + :param password: Password for HTTP basic authentication. + :param access_token: Access token. + :param server_index: Index to servers configuration. + :param server_variables: Mapping with string values to replace variables in + templated server configuration. The validation of enums is performed for + variables with defined enum values before. + :param server_operation_index: Mapping from operation ID to an index to server + configuration. + :param server_operation_variables: Mapping from operation ID to a mapping with + string values to replace variables in templated server configuration. + The validation of enums is performed for variables with defined enum + values before. + :param ssl_ca_cert: str - the path to a file of concatenated CA certificates + in PEM format. + :param retries: Number of retries for API requests. + + :Example: + """ + + _default: ClassVar[Optional[Self]] = None + + def __init__( + self, + host: Optional[str] = None, + api_key: Optional[Dict[str, str]] = None, + api_key_prefix: Optional[Dict[str, str]] = None, + username: Optional[str] = None, + password: Optional[str] = None, + access_token: Optional[str] = None, + server_index: Optional[int] = None, + server_variables: Optional[ServerVariablesT] = None, + server_operation_index: Optional[Dict[int, int]] = None, + server_operation_variables: Optional[Dict[int, ServerVariablesT]] = None, + ignore_operation_servers: bool = False, + ssl_ca_cert: Optional[str] = None, + retries: Optional[int] = None, + *, + debug: Optional[bool] = None, + ) -> None: + """Constructor""" + self._base_path = "https://api.together.xyz/v1" if host is None else host + """Default Base url + """ + self.server_index = 0 if server_index is None and host is None else server_index + self.server_operation_index = server_operation_index or {} + """Default server index + """ + self.server_variables = server_variables or {} + self.server_operation_variables = server_operation_variables or {} + """Default server variables + """ + self.ignore_operation_servers = ignore_operation_servers + """Ignore operation servers + """ + self.temp_folder_path = None + """Temp file folder for downloading files + """ + # Authentication Settings + self.api_key = {} + if api_key: + self.api_key = api_key + """dict to store API key(s) + """ + self.api_key_prefix = {} + if api_key_prefix: + self.api_key_prefix = api_key_prefix + """dict to store API prefix (e.g. Bearer) + """ + self.refresh_api_key_hook = None + """function hook to refresh API key if expired + """ + self.username = username + """Username for HTTP basic authentication + """ + self.password = password + """Password for HTTP basic authentication + """ + self.access_token = access_token + """Access token + """ + self.logger = {} + """Logging Settings + """ + self.logger["package_logger"] = logging.getLogger("together.generated") + self.logger["urllib3_logger"] = logging.getLogger("urllib3") + self.logger_format = "%(asctime)s %(levelname)s %(message)s" + """Log format + """ + self.logger_stream_handler = None + """Log stream handler + """ + self.logger_file_handler: Optional[FileHandler] = None + """Log file handler + """ + self.logger_file = None + """Debug file location + """ + if debug is not None: + self.debug = debug + else: + self.__debug = False + """Debug switch + """ + + self.verify_ssl = True + """SSL/TLS verification + Set this to false to skip verifying SSL certificate when calling API + from https server. + """ + self.ssl_ca_cert = ssl_ca_cert + """Set this to customize the certificate file to verify the peer. + """ + self.cert_file = None + """client certificate file + """ + self.key_file = None + """client key file + """ + self.assert_hostname = None + """Set this to True/False to enable/disable SSL hostname verification. + """ + self.tls_server_name = None + """SSL/TLS Server Name Indication (SNI) + Set this to the SNI value expected by the server. + """ + + self.connection_pool_maxsize = 100 + """This value is passed to the aiohttp to limit simultaneous connections. + Default values is 100, None means no-limit. + """ + + self.proxy: Optional[str] = None + """Proxy URL + """ + self.proxy_headers = None + """Proxy headers + """ + self.safe_chars_for_path_param = "" + """Safe chars for path_param + """ + self.retries = retries + """Adding retries to override urllib3 default value 3 + """ + # Enable client side validation + self.client_side_validation = True + + self.socket_options = None + """Options to pass down to the underlying urllib3 socket + """ + + self.datetime_format = "%Y-%m-%dT%H:%M:%S.%f%z" + """datetime format + """ + + self.date_format = "%Y-%m-%d" + """date format + """ + + def __deepcopy__(self, memo: Dict[int, Any]) -> Self: + cls = self.__class__ + result = cls.__new__(cls) + memo[id(self)] = result + for k, v in self.__dict__.items(): + if k not in ("logger", "logger_file_handler"): + setattr(result, k, copy.deepcopy(v, memo)) + # shallow copy of loggers + result.logger = copy.copy(self.logger) + # use setters to configure loggers + result.logger_file = self.logger_file + result.debug = self.debug + return result + + def __setattr__(self, name: str, value: Any) -> None: + object.__setattr__(self, name, value) + + @classmethod + def set_default(cls, default: Optional[Self]) -> None: + """Set default instance of configuration. + + It stores default configuration, which can be + returned by get_default_copy method. + + :param default: object of Configuration + """ + cls._default = default + + @classmethod + def get_default_copy(cls) -> Self: + """Deprecated. Please use `get_default` instead. + + Deprecated. Please use `get_default` instead. + + :return: The configuration object. + """ + return cls.get_default() + + @classmethod + def get_default(cls) -> Self: + """Return the default configuration. + + This method returns newly created, based on default constructor, + object of Configuration class or returns a copy of default + configuration. + + :return: The configuration object. + """ + if cls._default is None: + cls._default = cls() + return cls._default + + @property + def logger_file(self) -> Optional[str]: + """The logger file. + + If the logger_file is None, then add stream handler and remove file + handler. Otherwise, add file handler and remove stream handler. + + :param value: The logger_file path. + :type: str + """ + return self.__logger_file + + @logger_file.setter + def logger_file(self, value: Optional[str]) -> None: + """The logger file. + + If the logger_file is None, then add stream handler and remove file + handler. Otherwise, add file handler and remove stream handler. + + :param value: The logger_file path. + :type: str + """ + self.__logger_file = value + if self.__logger_file: + # If set logging file, + # then add file handler and remove stream handler. + self.logger_file_handler = logging.FileHandler(self.__logger_file) + self.logger_file_handler.setFormatter(self.logger_formatter) + for _, logger in self.logger.items(): + logger.addHandler(self.logger_file_handler) + + @property + def debug(self) -> bool: + """Debug status + + :param value: The debug status, True or False. + :type: bool + """ + return self.__debug + + @debug.setter + def debug(self, value: bool) -> None: + """Debug status + + :param value: The debug status, True or False. + :type: bool + """ + self.__debug = value + if self.__debug: + # if debug status is True, turn on debug logging + for _, logger in self.logger.items(): + logger.setLevel(logging.DEBUG) + # turn on httplib debug + httplib.HTTPConnection.debuglevel = 1 + else: + # if debug status is False, turn off debug logging, + # setting log level to default `logging.WARNING` + for _, logger in self.logger.items(): + logger.setLevel(logging.WARNING) + # turn off httplib debug + httplib.HTTPConnection.debuglevel = 0 + + @property + def logger_format(self) -> str: + """The logger format. + + The logger_formatter will be updated when sets logger_format. + + :param value: The format string. + :type: str + """ + return self.__logger_format + + @logger_format.setter + def logger_format(self, value: str) -> None: + """The logger format. + + The logger_formatter will be updated when sets logger_format. + + :param value: The format string. + :type: str + """ + self.__logger_format = value + self.logger_formatter = logging.Formatter(self.__logger_format) + + def get_api_key_with_prefix( + self, identifier: str, alias: Optional[str] = None + ) -> Optional[str]: + """Gets API key (with prefix if set). + + :param identifier: The identifier of apiKey. + :param alias: The alternative identifier of apiKey. + :return: The token for api key authentication. + """ + if self.refresh_api_key_hook is not None: + self.refresh_api_key_hook(self) + key = self.api_key.get( + identifier, self.api_key.get(alias) if alias is not None else None + ) + if key: + prefix = self.api_key_prefix.get(identifier) + if prefix: + return "%s %s" % (prefix, key) + else: + return key + + return None + + def get_basic_auth_token(self) -> Optional[str]: + """Gets HTTP basic authentication header (string). + + :return: The token for basic HTTP authentication. + """ + username = "" + if self.username is not None: + username = self.username + password = "" + if self.password is not None: + password = self.password + return urllib3.util.make_headers(basic_auth=username + ":" + password).get( + "authorization" + ) + + def auth_settings(self) -> AuthSettings: + """Gets Auth Settings dict for api client. + + :return: The Auth Settings information dict. + """ + auth: AuthSettings = {} + if self.access_token is not None: + auth["bearerAuth"] = { + "type": "bearer", + "in": "header", + "key": "Authorization", + "value": "Bearer " + self.access_token, + } + return auth + + def to_debug_report(self) -> str: + """Gets the essential information for debugging. + + :return: The report for debugging. + """ + return ( + "Python SDK Debug Report:\n" + "OS: {env}\n" + "Python Version: {pyversion}\n" + "Version of the API: 2.0.0\n" + "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version) + ) + + def get_host_settings(self) -> List[HostSetting]: + """Gets an array of host settings + + :return: An array of host settings + """ + return [ + { + "url": "https://api.together.xyz/v1", + "description": "No description provided", + } + ] + + def get_host_from_settings( + self, + index: Optional[int], + variables: Optional[ServerVariablesT] = None, + servers: Optional[List[HostSetting]] = None, + ) -> str: + """Gets host URL based on the index and variables + :param index: array index of the host settings + :param variables: hash of variable and the corresponding value + :param servers: an array of host settings or None + :return: URL based on host settings + """ + if index is None: + return self._base_path + + variables = {} if variables is None else variables + servers = self.get_host_settings() if servers is None else servers + + try: + server = servers[index] + except IndexError: + raise ValueError( + "Invalid index {0} when selecting the host settings. " + "Must be less than {1}".format(index, len(servers)) + ) + + url = server["url"] + + # go through variables and replace placeholders + for variable_name, variable in server.get("variables", {}).items(): + used_value = variables.get(variable_name, variable["default_value"]) + + if "enum_values" in variable and used_value not in variable["enum_values"]: + raise ValueError( + "The variable `{0}` in the host URL has invalid value " + "{1}. Must be {2}.".format( + variable_name, variables[variable_name], variable["enum_values"] + ) + ) + + url = url.replace("{" + variable_name + "}", used_value) + + return url + + @property + def host(self) -> str: + """Return generated host.""" + return self.get_host_from_settings( + self.server_index, variables=self.server_variables + ) + + @host.setter + def host(self, value: str) -> None: + """Fix base path.""" + self._base_path = value + self.server_index = None diff --git a/src/together/generated/docs/AudioApi.md b/src/together/generated/docs/AudioApi.md new file mode 100644 index 00000000..adf6ebb4 --- /dev/null +++ b/src/together/generated/docs/AudioApi.md @@ -0,0 +1,88 @@ +# together.generated.AudioApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**audio_speech**](AudioApi.md#audio_speech) | **POST** /audio/speech | Create audio generation request + + +# **audio_speech** +> bytearray audio_speech(audio_speech_request=audio_speech_request) + +Create audio generation request + +Generate audio from input text + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.audio_speech_request import AudioSpeechRequest +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.AudioApi(api_client) + audio_speech_request = together.generated.AudioSpeechRequest() # AudioSpeechRequest | (optional) + + try: + # Create audio generation request + api_response = await api_instance.audio_speech(audio_speech_request=audio_speech_request) + print("The response of AudioApi->audio_speech:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling AudioApi->audio_speech: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **audio_speech_request** | [**AudioSpeechRequest**](AudioSpeechRequest.md)| | [optional] + +### Return type + +**bytearray** + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/octet-stream, audio/wav, audio/mpeg, text/event-stream, application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | BadRequest | - | +**429** | RateLimit | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequest.md b/src/together/generated/docs/AudioSpeechRequest.md new file mode 100644 index 00000000..23ef3ade --- /dev/null +++ b/src/together/generated/docs/AudioSpeechRequest.md @@ -0,0 +1,34 @@ +# AudioSpeechRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model** | [**AudioSpeechRequestModel**](AudioSpeechRequestModel.md) | | +**input** | **str** | Input text to generate the audio for | +**voice** | [**AudioSpeechRequestVoice**](AudioSpeechRequestVoice.md) | | +**response_format** | **str** | The format of audio output | [optional] [default to 'wav'] +**language** | **str** | Language of input text | [optional] [default to 'en'] +**response_encoding** | **str** | Audio encoding of response | [optional] [default to 'pcm_f32le'] +**sample_rate** | **float** | Sampling rate to use for the output audio | [optional] [default to 44100] +**stream** | **bool** | If true, output is streamed for several characters at a time instead of waiting for the full response. The stream terminates with `data: [DONE]`. If false, return the encoded audio as octet stream | [optional] [default to False] + +## Example + +```python +from together.generated.models.audio_speech_request import AudioSpeechRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechRequest from a JSON string +audio_speech_request_instance = AudioSpeechRequest.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechRequest.to_json()) + +# convert the object into a dict +audio_speech_request_dict = audio_speech_request_instance.to_dict() +# create an instance of AudioSpeechRequest from a dict +audio_speech_request_from_dict = AudioSpeechRequest.from_dict(audio_speech_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequestModel.md b/src/together/generated/docs/AudioSpeechRequestModel.md new file mode 100644 index 00000000..41febc9a --- /dev/null +++ b/src/together/generated/docs/AudioSpeechRequestModel.md @@ -0,0 +1,27 @@ +# AudioSpeechRequestModel + +The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechRequestModel from a JSON string +audio_speech_request_model_instance = AudioSpeechRequestModel.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechRequestModel.to_json()) + +# convert the object into a dict +audio_speech_request_model_dict = audio_speech_request_model_instance.to_dict() +# create an instance of AudioSpeechRequestModel from a dict +audio_speech_request_model_from_dict = AudioSpeechRequestModel.from_dict(audio_speech_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechRequestVoice.md b/src/together/generated/docs/AudioSpeechRequestVoice.md new file mode 100644 index 00000000..9ad16586 --- /dev/null +++ b/src/together/generated/docs/AudioSpeechRequestVoice.md @@ -0,0 +1,27 @@ +# AudioSpeechRequestVoice + +The voice to use for generating the audio. [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechRequestVoice from a JSON string +audio_speech_request_voice_instance = AudioSpeechRequestVoice.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechRequestVoice.to_json()) + +# convert the object into a dict +audio_speech_request_voice_dict = audio_speech_request_voice_instance.to_dict() +# create an instance of AudioSpeechRequestVoice from a dict +audio_speech_request_voice_from_dict = AudioSpeechRequestVoice.from_dict(audio_speech_request_voice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamChunk.md b/src/together/generated/docs/AudioSpeechStreamChunk.md new file mode 100644 index 00000000..7d9f5558 --- /dev/null +++ b/src/together/generated/docs/AudioSpeechStreamChunk.md @@ -0,0 +1,29 @@ +# AudioSpeechStreamChunk + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**model** | **str** | | +**b64** | **str** | base64 encoded audio stream | + +## Example + +```python +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechStreamChunk from a JSON string +audio_speech_stream_chunk_instance = AudioSpeechStreamChunk.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechStreamChunk.to_json()) + +# convert the object into a dict +audio_speech_stream_chunk_dict = audio_speech_stream_chunk_instance.to_dict() +# create an instance of AudioSpeechStreamChunk from a dict +audio_speech_stream_chunk_from_dict = AudioSpeechStreamChunk.from_dict(audio_speech_stream_chunk_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamEvent.md b/src/together/generated/docs/AudioSpeechStreamEvent.md new file mode 100644 index 00000000..9c2d9f7a --- /dev/null +++ b/src/together/generated/docs/AudioSpeechStreamEvent.md @@ -0,0 +1,27 @@ +# AudioSpeechStreamEvent + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**AudioSpeechStreamChunk**](AudioSpeechStreamChunk.md) | | + +## Example + +```python +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechStreamEvent from a JSON string +audio_speech_stream_event_instance = AudioSpeechStreamEvent.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechStreamEvent.to_json()) + +# convert the object into a dict +audio_speech_stream_event_dict = audio_speech_stream_event_instance.to_dict() +# create an instance of AudioSpeechStreamEvent from a dict +audio_speech_stream_event_from_dict = AudioSpeechStreamEvent.from_dict(audio_speech_stream_event_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/AudioSpeechStreamResponse.md b/src/together/generated/docs/AudioSpeechStreamResponse.md new file mode 100644 index 00000000..eda7c0b0 --- /dev/null +++ b/src/together/generated/docs/AudioSpeechStreamResponse.md @@ -0,0 +1,27 @@ +# AudioSpeechStreamResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | **str** | | + +## Example + +```python +from together.generated.models.audio_speech_stream_response import AudioSpeechStreamResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of AudioSpeechStreamResponse from a JSON string +audio_speech_stream_response_instance = AudioSpeechStreamResponse.from_json(json) +# print the JSON string representation of the object +print(AudioSpeechStreamResponse.to_json()) + +# convert the object into a dict +audio_speech_stream_response_dict = audio_speech_stream_response_instance.to_dict() +# create an instance of AudioSpeechStreamResponse from a dict +audio_speech_stream_response_from_dict = AudioSpeechStreamResponse.from_dict(audio_speech_stream_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/Autoscaling.md b/src/together/generated/docs/Autoscaling.md new file mode 100644 index 00000000..b1ee0b95 --- /dev/null +++ b/src/together/generated/docs/Autoscaling.md @@ -0,0 +1,29 @@ +# Autoscaling + +Configuration for automatic scaling of replicas based on demand. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**min_replicas** | **int** | The minimum number of replicas to maintain, even when there is no load | +**max_replicas** | **int** | The maximum number of replicas to scale up to under load | + +## Example + +```python +from together.generated.models.autoscaling import Autoscaling + +# TODO update the JSON string below +json = "{}" +# create an instance of Autoscaling from a JSON string +autoscaling_instance = Autoscaling.from_json(json) +# print the JSON string representation of the object +print(Autoscaling.to_json()) + +# convert the object into a dict +autoscaling_dict = autoscaling_instance.to_dict() +# create an instance of Autoscaling from a dict +autoscaling_from_dict = Autoscaling.from_dict(autoscaling_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatApi.md b/src/together/generated/docs/ChatApi.md new file mode 100644 index 00000000..dd0e179d --- /dev/null +++ b/src/together/generated/docs/ChatApi.md @@ -0,0 +1,93 @@ +# together.generated.ChatApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**chat_completions**](ChatApi.md#chat_completions) | **POST** /chat/completions | Create chat completion + + +# **chat_completions** +> ChatCompletionResponse chat_completions(chat_completion_request=chat_completion_request) + +Create chat completion + +Query a chat model. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.chat_completion_request import ChatCompletionRequest +from together.generated.models.chat_completion_response import ChatCompletionResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.ChatApi(api_client) + chat_completion_request = together.generated.ChatCompletionRequest() # ChatCompletionRequest | (optional) + + try: + # Create chat completion + api_response = await api_instance.chat_completions(chat_completion_request=chat_completion_request) + print("The response of ChatApi->chat_completions:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling ChatApi->chat_completions: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **chat_completion_request** | [**ChatCompletionRequest**](ChatCompletionRequest.md)| | [optional] + +### Return type + +[**ChatCompletionResponse**](ChatCompletionResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json, text/event-stream + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**503** | Overloaded | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionAssistantMessageParam.md b/src/together/generated/docs/ChatCompletionAssistantMessageParam.md new file mode 100644 index 00000000..5281fe5c --- /dev/null +++ b/src/together/generated/docs/ChatCompletionAssistantMessageParam.md @@ -0,0 +1,31 @@ +# ChatCompletionAssistantMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | [optional] +**role** | **str** | | +**name** | **str** | | [optional] +**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] +**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionAssistantMessageParam from a JSON string +chat_completion_assistant_message_param_instance = ChatCompletionAssistantMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionAssistantMessageParam.to_json()) + +# convert the object into a dict +chat_completion_assistant_message_param_dict = chat_completion_assistant_message_param_instance.to_dict() +# create an instance of ChatCompletionAssistantMessageParam from a dict +chat_completion_assistant_message_param_from_dict = ChatCompletionAssistantMessageParam.from_dict(chat_completion_assistant_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoice.md b/src/together/generated/docs/ChatCompletionChoice.md new file mode 100644 index 00000000..b75becc7 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoice.md @@ -0,0 +1,30 @@ +# ChatCompletionChoice + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **int** | | +**finish_reason** | [**FinishReason**](FinishReason.md) | | +**logprobs** | [**LogprobsPart**](LogprobsPart.md) | | [optional] +**delta** | [**ChatCompletionChoiceDelta**](ChatCompletionChoiceDelta.md) | | + +## Example + +```python +from together.generated.models.chat_completion_choice import ChatCompletionChoice + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoice from a JSON string +chat_completion_choice_instance = ChatCompletionChoice.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoice.to_json()) + +# convert the object into a dict +chat_completion_choice_dict = chat_completion_choice_instance.to_dict() +# create an instance of ChatCompletionChoice from a dict +chat_completion_choice_from_dict = ChatCompletionChoice.from_dict(chat_completion_choice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoiceDelta.md b/src/together/generated/docs/ChatCompletionChoiceDelta.md new file mode 100644 index 00000000..865b8090 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoiceDelta.md @@ -0,0 +1,31 @@ +# ChatCompletionChoiceDelta + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**token_id** | **int** | | [optional] +**role** | **str** | | +**content** | **str** | | [optional] +**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] +**function_call** | [**ChatCompletionChoiceDeltaFunctionCall**](ChatCompletionChoiceDeltaFunctionCall.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_choice_delta import ChatCompletionChoiceDelta + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoiceDelta from a JSON string +chat_completion_choice_delta_instance = ChatCompletionChoiceDelta.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoiceDelta.to_json()) + +# convert the object into a dict +chat_completion_choice_delta_dict = chat_completion_choice_delta_instance.to_dict() +# create an instance of ChatCompletionChoiceDelta from a dict +chat_completion_choice_delta_from_dict = ChatCompletionChoiceDelta.from_dict(chat_completion_choice_delta_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md b/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md new file mode 100644 index 00000000..e6e861f0 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoiceDeltaFunctionCall.md @@ -0,0 +1,28 @@ +# ChatCompletionChoiceDeltaFunctionCall + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**arguments** | **str** | | +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_choice_delta_function_call import ChatCompletionChoiceDeltaFunctionCall + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoiceDeltaFunctionCall from a JSON string +chat_completion_choice_delta_function_call_instance = ChatCompletionChoiceDeltaFunctionCall.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoiceDeltaFunctionCall.to_json()) + +# convert the object into a dict +chat_completion_choice_delta_function_call_dict = chat_completion_choice_delta_function_call_instance.to_dict() +# create an instance of ChatCompletionChoiceDeltaFunctionCall from a dict +chat_completion_choice_delta_function_call_from_dict = ChatCompletionChoiceDeltaFunctionCall.from_dict(chat_completion_choice_delta_function_call_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoicesDataInner.md b/src/together/generated/docs/ChatCompletionChoicesDataInner.md new file mode 100644 index 00000000..56fe92b1 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoicesDataInner.md @@ -0,0 +1,32 @@ +# ChatCompletionChoicesDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] +**index** | **int** | | [optional] +**seed** | **int** | | [optional] +**finish_reason** | [**FinishReason**](FinishReason.md) | | [optional] +**message** | [**ChatCompletionMessage**](ChatCompletionMessage.md) | | [optional] +**logprobs** | [**ChatCompletionChoicesDataInnerLogprobs**](ChatCompletionChoicesDataInnerLogprobs.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_choices_data_inner import ChatCompletionChoicesDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoicesDataInner from a JSON string +chat_completion_choices_data_inner_instance = ChatCompletionChoicesDataInner.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoicesDataInner.to_json()) + +# convert the object into a dict +chat_completion_choices_data_inner_dict = chat_completion_choices_data_inner_instance.to_dict() +# create an instance of ChatCompletionChoicesDataInner from a dict +chat_completion_choices_data_inner_from_dict = ChatCompletionChoicesDataInner.from_dict(chat_completion_choices_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md b/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md new file mode 100644 index 00000000..72320aab --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChoicesDataInnerLogprobs.md @@ -0,0 +1,29 @@ +# ChatCompletionChoicesDataInnerLogprobs + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**token_ids** | **List[float]** | List of token IDs corresponding to the logprobs | [optional] +**tokens** | **List[str]** | List of token strings | [optional] +**token_logprobs** | **List[float]** | List of token log probabilities | [optional] + +## Example + +```python +from together.generated.models.chat_completion_choices_data_inner_logprobs import ChatCompletionChoicesDataInnerLogprobs + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChoicesDataInnerLogprobs from a JSON string +chat_completion_choices_data_inner_logprobs_instance = ChatCompletionChoicesDataInnerLogprobs.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChoicesDataInnerLogprobs.to_json()) + +# convert the object into a dict +chat_completion_choices_data_inner_logprobs_dict = chat_completion_choices_data_inner_logprobs_instance.to_dict() +# create an instance of ChatCompletionChoicesDataInnerLogprobs from a dict +chat_completion_choices_data_inner_logprobs_from_dict = ChatCompletionChoicesDataInnerLogprobs.from_dict(chat_completion_choices_data_inner_logprobs_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChunk.md b/src/together/generated/docs/ChatCompletionChunk.md new file mode 100644 index 00000000..d42484b9 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChunk.md @@ -0,0 +1,33 @@ +# ChatCompletionChunk + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**object** | **str** | | +**created** | **int** | | +**system_fingerprint** | **str** | | [optional] +**model** | **str** | | +**choices** | [**List[ChatCompletionChunkChoicesInner]**](ChatCompletionChunkChoicesInner.md) | | +**usage** | [**CompletionChunkUsage**](CompletionChunkUsage.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_chunk import ChatCompletionChunk + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChunk from a JSON string +chat_completion_chunk_instance = ChatCompletionChunk.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChunk.to_json()) + +# convert the object into a dict +chat_completion_chunk_dict = chat_completion_chunk_instance.to_dict() +# create an instance of ChatCompletionChunk from a dict +chat_completion_chunk_from_dict = ChatCompletionChunk.from_dict(chat_completion_chunk_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionChunkChoicesInner.md b/src/together/generated/docs/ChatCompletionChunkChoicesInner.md new file mode 100644 index 00000000..b33bff02 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionChunkChoicesInner.md @@ -0,0 +1,31 @@ +# ChatCompletionChunkChoicesInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **int** | | +**finish_reason** | [**FinishReason**](FinishReason.md) | | +**logprobs** | **float** | | [optional] +**seed** | **int** | | [optional] +**delta** | [**ChatCompletionChoiceDelta**](ChatCompletionChoiceDelta.md) | | + +## Example + +```python +from together.generated.models.chat_completion_chunk_choices_inner import ChatCompletionChunkChoicesInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionChunkChoicesInner from a JSON string +chat_completion_chunk_choices_inner_instance = ChatCompletionChunkChoicesInner.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionChunkChoicesInner.to_json()) + +# convert the object into a dict +chat_completion_chunk_choices_inner_dict = chat_completion_chunk_choices_inner_instance.to_dict() +# create an instance of ChatCompletionChunkChoicesInner from a dict +chat_completion_chunk_choices_inner_from_dict = ChatCompletionChunkChoicesInner.from_dict(chat_completion_chunk_choices_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionEvent.md b/src/together/generated/docs/ChatCompletionEvent.md new file mode 100644 index 00000000..49c1046a --- /dev/null +++ b/src/together/generated/docs/ChatCompletionEvent.md @@ -0,0 +1,27 @@ +# ChatCompletionEvent + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**ChatCompletionChunk**](ChatCompletionChunk.md) | | + +## Example + +```python +from together.generated.models.chat_completion_event import ChatCompletionEvent + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionEvent from a JSON string +chat_completion_event_instance = ChatCompletionEvent.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionEvent.to_json()) + +# convert the object into a dict +chat_completion_event_dict = chat_completion_event_instance.to_dict() +# create an instance of ChatCompletionEvent from a dict +chat_completion_event_from_dict = ChatCompletionEvent.from_dict(chat_completion_event_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionFunctionMessageParam.md b/src/together/generated/docs/ChatCompletionFunctionMessageParam.md new file mode 100644 index 00000000..1f89e299 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionFunctionMessageParam.md @@ -0,0 +1,29 @@ +# ChatCompletionFunctionMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**role** | **str** | | +**content** | **str** | | +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_function_message_param import ChatCompletionFunctionMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionFunctionMessageParam from a JSON string +chat_completion_function_message_param_instance = ChatCompletionFunctionMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionFunctionMessageParam.to_json()) + +# convert the object into a dict +chat_completion_function_message_param_dict = chat_completion_function_message_param_instance.to_dict() +# create an instance of ChatCompletionFunctionMessageParam from a dict +chat_completion_function_message_param_from_dict = ChatCompletionFunctionMessageParam.from_dict(chat_completion_function_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessage.md b/src/together/generated/docs/ChatCompletionMessage.md new file mode 100644 index 00000000..d3d814a1 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionMessage.md @@ -0,0 +1,30 @@ +# ChatCompletionMessage + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | +**role** | **str** | | +**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] +**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_message import ChatCompletionMessage + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionMessage from a JSON string +chat_completion_message_instance = ChatCompletionMessage.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionMessage.to_json()) + +# convert the object into a dict +chat_completion_message_dict = chat_completion_message_instance.to_dict() +# create an instance of ChatCompletionMessage from a dict +chat_completion_message_from_dict = ChatCompletionMessage.from_dict(chat_completion_message_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessageFunctionCall.md b/src/together/generated/docs/ChatCompletionMessageFunctionCall.md new file mode 100644 index 00000000..177d9d1d --- /dev/null +++ b/src/together/generated/docs/ChatCompletionMessageFunctionCall.md @@ -0,0 +1,28 @@ +# ChatCompletionMessageFunctionCall + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**arguments** | **str** | | +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_message_function_call import ChatCompletionMessageFunctionCall + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionMessageFunctionCall from a JSON string +chat_completion_message_function_call_instance = ChatCompletionMessageFunctionCall.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionMessageFunctionCall.to_json()) + +# convert the object into a dict +chat_completion_message_function_call_dict = chat_completion_message_function_call_instance.to_dict() +# create an instance of ChatCompletionMessageFunctionCall from a dict +chat_completion_message_function_call_from_dict = ChatCompletionMessageFunctionCall.from_dict(chat_completion_message_function_call_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionMessageParam.md b/src/together/generated/docs/ChatCompletionMessageParam.md new file mode 100644 index 00000000..c8e6136d --- /dev/null +++ b/src/together/generated/docs/ChatCompletionMessageParam.md @@ -0,0 +1,32 @@ +# ChatCompletionMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | +**role** | **str** | | +**name** | **str** | | +**tool_calls** | [**List[ToolChoice]**](ToolChoice.md) | | [optional] +**function_call** | [**ChatCompletionMessageFunctionCall**](ChatCompletionMessageFunctionCall.md) | | [optional] +**tool_call_id** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_message_param import ChatCompletionMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionMessageParam from a JSON string +chat_completion_message_param_instance = ChatCompletionMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionMessageParam.to_json()) + +# convert the object into a dict +chat_completion_message_param_dict = chat_completion_message_param_instance.to_dict() +# create an instance of ChatCompletionMessageParam from a dict +chat_completion_message_param_from_dict = ChatCompletionMessageParam.from_dict(chat_completion_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequest.md b/src/together/generated/docs/ChatCompletionRequest.md new file mode 100644 index 00000000..f0200b3f --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequest.md @@ -0,0 +1,49 @@ +# ChatCompletionRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**messages** | [**List[ChatCompletionRequestMessagesInner]**](ChatCompletionRequestMessagesInner.md) | A list of messages comprising the conversation so far. | +**model** | [**ChatCompletionRequestModel**](ChatCompletionRequestModel.md) | | +**max_tokens** | **int** | The maximum number of tokens to generate. | [optional] +**stop** | **List[str]** | A list of string sequences that will truncate (stop) inference text output. For example, \"</s>\" will stop generation as soon as the model generates the given token. | [optional] +**temperature** | **float** | A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. | [optional] +**top_p** | **float** | A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. | [optional] +**top_k** | **int** | An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. | [optional] +**context_length_exceeded_behavior** | **str** | Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model. | [optional] [default to 'error'] +**repetition_penalty** | **float** | A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. | [optional] +**stream** | **bool** | If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results. | [optional] +**logprobs** | **int** | Determines the number of most likely tokens to return at each token position log probabilities to return. | [optional] +**echo** | **bool** | If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. | [optional] +**n** | **int** | The number of completions to generate for each prompt. | [optional] +**min_p** | **float** | A number between 0 and 1 that can be used as an alternative to top_p and top-k. | [optional] +**presence_penalty** | **float** | A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. | [optional] +**frequency_penalty** | **float** | A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. | [optional] +**logit_bias** | **Dict[str, float]** | Adjusts the likelihood of specific tokens appearing in the generated output. | [optional] +**seed** | **int** | Seed value for reproducibility. | [optional] +**function_call** | [**ChatCompletionRequestFunctionCall**](ChatCompletionRequestFunctionCall.md) | | [optional] +**response_format** | [**ChatCompletionRequestResponseFormat**](ChatCompletionRequestResponseFormat.md) | | [optional] +**tools** | [**List[ToolsPart]**](ToolsPart.md) | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. | [optional] +**tool_choice** | [**ChatCompletionRequestToolChoice**](ChatCompletionRequestToolChoice.md) | | [optional] +**safety_model** | **str** | The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). | [optional] + +## Example + +```python +from together.generated.models.chat_completion_request import ChatCompletionRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequest from a JSON string +chat_completion_request_instance = ChatCompletionRequest.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequest.to_json()) + +# convert the object into a dict +chat_completion_request_dict = chat_completion_request_instance.to_dict() +# create an instance of ChatCompletionRequest from a dict +chat_completion_request_from_dict = ChatCompletionRequest.from_dict(chat_completion_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestFunctionCall.md b/src/together/generated/docs/ChatCompletionRequestFunctionCall.md new file mode 100644 index 00000000..dbeddd2a --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestFunctionCall.md @@ -0,0 +1,27 @@ +# ChatCompletionRequestFunctionCall + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_request_function_call import ChatCompletionRequestFunctionCall + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestFunctionCall from a JSON string +chat_completion_request_function_call_instance = ChatCompletionRequestFunctionCall.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestFunctionCall.to_json()) + +# convert the object into a dict +chat_completion_request_function_call_dict = chat_completion_request_function_call_instance.to_dict() +# create an instance of ChatCompletionRequestFunctionCall from a dict +chat_completion_request_function_call_from_dict = ChatCompletionRequestFunctionCall.from_dict(chat_completion_request_function_call_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md b/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md new file mode 100644 index 00000000..bb0a34f7 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestFunctionCallOneOf.md @@ -0,0 +1,27 @@ +# ChatCompletionRequestFunctionCallOneOf + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_request_function_call_one_of import ChatCompletionRequestFunctionCallOneOf + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestFunctionCallOneOf from a JSON string +chat_completion_request_function_call_one_of_instance = ChatCompletionRequestFunctionCallOneOf.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestFunctionCallOneOf.to_json()) + +# convert the object into a dict +chat_completion_request_function_call_one_of_dict = chat_completion_request_function_call_one_of_instance.to_dict() +# create an instance of ChatCompletionRequestFunctionCallOneOf from a dict +chat_completion_request_function_call_one_of_from_dict = ChatCompletionRequestFunctionCallOneOf.from_dict(chat_completion_request_function_call_one_of_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestMessagesInner.md b/src/together/generated/docs/ChatCompletionRequestMessagesInner.md new file mode 100644 index 00000000..8512ef45 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestMessagesInner.md @@ -0,0 +1,28 @@ +# ChatCompletionRequestMessagesInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**role** | **str** | The role of the messages author. Choice between: system, user, or assistant. | +**content** | **str** | The content of the message, which can either be a simple string or a structured format. | + +## Example + +```python +from together.generated.models.chat_completion_request_messages_inner import ChatCompletionRequestMessagesInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestMessagesInner from a JSON string +chat_completion_request_messages_inner_instance = ChatCompletionRequestMessagesInner.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestMessagesInner.to_json()) + +# convert the object into a dict +chat_completion_request_messages_inner_dict = chat_completion_request_messages_inner_instance.to_dict() +# create an instance of ChatCompletionRequestMessagesInner from a dict +chat_completion_request_messages_inner_from_dict = ChatCompletionRequestMessagesInner.from_dict(chat_completion_request_messages_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestModel.md b/src/together/generated/docs/ChatCompletionRequestModel.md new file mode 100644 index 00000000..c9387ca0 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestModel.md @@ -0,0 +1,27 @@ +# ChatCompletionRequestModel + +The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.chat_completion_request_model import ChatCompletionRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestModel from a JSON string +chat_completion_request_model_instance = ChatCompletionRequestModel.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestModel.to_json()) + +# convert the object into a dict +chat_completion_request_model_dict = chat_completion_request_model_instance.to_dict() +# create an instance of ChatCompletionRequestModel from a dict +chat_completion_request_model_from_dict = ChatCompletionRequestModel.from_dict(chat_completion_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestResponseFormat.md b/src/together/generated/docs/ChatCompletionRequestResponseFormat.md new file mode 100644 index 00000000..aa94fd0a --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestResponseFormat.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestResponseFormat + +An object specifying the format that the model must output. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | The type of the response format. | [optional] +**var_schema** | **Dict[str, str]** | The schema of the response format. | [optional] + +## Example + +```python +from together.generated.models.chat_completion_request_response_format import ChatCompletionRequestResponseFormat + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestResponseFormat from a JSON string +chat_completion_request_response_format_instance = ChatCompletionRequestResponseFormat.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestResponseFormat.to_json()) + +# convert the object into a dict +chat_completion_request_response_format_dict = chat_completion_request_response_format_instance.to_dict() +# create an instance of ChatCompletionRequestResponseFormat from a dict +chat_completion_request_response_format_from_dict = ChatCompletionRequestResponseFormat.from_dict(chat_completion_request_response_format_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionRequestToolChoice.md b/src/together/generated/docs/ChatCompletionRequestToolChoice.md new file mode 100644 index 00000000..c6a50e08 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionRequestToolChoice.md @@ -0,0 +1,31 @@ +# ChatCompletionRequestToolChoice + +Controls which (if any) function is called by the model. By default uses `auto`, which lets the model pick between generating a message or calling a function. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **float** | | +**id** | **str** | | +**type** | **str** | | +**function** | [**ToolChoiceFunction**](ToolChoiceFunction.md) | | + +## Example + +```python +from together.generated.models.chat_completion_request_tool_choice import ChatCompletionRequestToolChoice + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionRequestToolChoice from a JSON string +chat_completion_request_tool_choice_instance = ChatCompletionRequestToolChoice.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionRequestToolChoice.to_json()) + +# convert the object into a dict +chat_completion_request_tool_choice_dict = chat_completion_request_tool_choice_instance.to_dict() +# create an instance of ChatCompletionRequestToolChoice from a dict +chat_completion_request_tool_choice_from_dict = ChatCompletionRequestToolChoice.from_dict(chat_completion_request_tool_choice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionResponse.md b/src/together/generated/docs/ChatCompletionResponse.md new file mode 100644 index 00000000..bff9c23b --- /dev/null +++ b/src/together/generated/docs/ChatCompletionResponse.md @@ -0,0 +1,32 @@ +# ChatCompletionResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**choices** | [**List[ChatCompletionChoicesDataInner]**](ChatCompletionChoicesDataInner.md) | | +**usage** | [**UsageData**](UsageData.md) | | [optional] +**created** | **int** | | +**model** | **str** | | +**object** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_response import ChatCompletionResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionResponse from a JSON string +chat_completion_response_instance = ChatCompletionResponse.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionResponse.to_json()) + +# convert the object into a dict +chat_completion_response_dict = chat_completion_response_instance.to_dict() +# create an instance of ChatCompletionResponse from a dict +chat_completion_response_from_dict = ChatCompletionResponse.from_dict(chat_completion_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionStream.md b/src/together/generated/docs/ChatCompletionStream.md new file mode 100644 index 00000000..0425981a --- /dev/null +++ b/src/together/generated/docs/ChatCompletionStream.md @@ -0,0 +1,27 @@ +# ChatCompletionStream + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_stream import ChatCompletionStream + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionStream from a JSON string +chat_completion_stream_instance = ChatCompletionStream.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionStream.to_json()) + +# convert the object into a dict +chat_completion_stream_dict = chat_completion_stream_instance.to_dict() +# create an instance of ChatCompletionStream from a dict +chat_completion_stream_from_dict = ChatCompletionStream.from_dict(chat_completion_stream_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionSystemMessageParam.md b/src/together/generated/docs/ChatCompletionSystemMessageParam.md new file mode 100644 index 00000000..b1b6ee2e --- /dev/null +++ b/src/together/generated/docs/ChatCompletionSystemMessageParam.md @@ -0,0 +1,29 @@ +# ChatCompletionSystemMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | +**role** | **str** | | +**name** | **str** | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_system_message_param import ChatCompletionSystemMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionSystemMessageParam from a JSON string +chat_completion_system_message_param_instance = ChatCompletionSystemMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionSystemMessageParam.to_json()) + +# convert the object into a dict +chat_completion_system_message_param_dict = chat_completion_system_message_param_instance.to_dict() +# create an instance of ChatCompletionSystemMessageParam from a dict +chat_completion_system_message_param_from_dict = ChatCompletionSystemMessageParam.from_dict(chat_completion_system_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToken.md b/src/together/generated/docs/ChatCompletionToken.md new file mode 100644 index 00000000..159ba763 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionToken.md @@ -0,0 +1,30 @@ +# ChatCompletionToken + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **int** | | +**text** | **str** | | +**logprob** | **float** | | +**special** | **bool** | | + +## Example + +```python +from together.generated.models.chat_completion_token import ChatCompletionToken + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionToken from a JSON string +chat_completion_token_instance = ChatCompletionToken.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionToken.to_json()) + +# convert the object into a dict +chat_completion_token_dict = chat_completion_token_instance.to_dict() +# create an instance of ChatCompletionToken from a dict +chat_completion_token_from_dict = ChatCompletionToken.from_dict(chat_completion_token_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionTool.md b/src/together/generated/docs/ChatCompletionTool.md new file mode 100644 index 00000000..ff0a341f --- /dev/null +++ b/src/together/generated/docs/ChatCompletionTool.md @@ -0,0 +1,28 @@ +# ChatCompletionTool + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | +**function** | [**ChatCompletionToolFunction**](ChatCompletionToolFunction.md) | | + +## Example + +```python +from together.generated.models.chat_completion_tool import ChatCompletionTool + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionTool from a JSON string +chat_completion_tool_instance = ChatCompletionTool.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionTool.to_json()) + +# convert the object into a dict +chat_completion_tool_dict = chat_completion_tool_instance.to_dict() +# create an instance of ChatCompletionTool from a dict +chat_completion_tool_from_dict = ChatCompletionTool.from_dict(chat_completion_tool_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToolFunction.md b/src/together/generated/docs/ChatCompletionToolFunction.md new file mode 100644 index 00000000..a84fb6e5 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionToolFunction.md @@ -0,0 +1,29 @@ +# ChatCompletionToolFunction + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**description** | **str** | | [optional] +**name** | **str** | | +**parameters** | **Dict[str, object]** | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_tool_function import ChatCompletionToolFunction + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionToolFunction from a JSON string +chat_completion_tool_function_instance = ChatCompletionToolFunction.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionToolFunction.to_json()) + +# convert the object into a dict +chat_completion_tool_function_dict = chat_completion_tool_function_instance.to_dict() +# create an instance of ChatCompletionToolFunction from a dict +chat_completion_tool_function_from_dict = ChatCompletionToolFunction.from_dict(chat_completion_tool_function_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionToolMessageParam.md b/src/together/generated/docs/ChatCompletionToolMessageParam.md new file mode 100644 index 00000000..6e45af91 --- /dev/null +++ b/src/together/generated/docs/ChatCompletionToolMessageParam.md @@ -0,0 +1,29 @@ +# ChatCompletionToolMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**role** | **str** | | +**content** | **str** | | +**tool_call_id** | **str** | | + +## Example + +```python +from together.generated.models.chat_completion_tool_message_param import ChatCompletionToolMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionToolMessageParam from a JSON string +chat_completion_tool_message_param_instance = ChatCompletionToolMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionToolMessageParam.to_json()) + +# convert the object into a dict +chat_completion_tool_message_param_dict = chat_completion_tool_message_param_instance.to_dict() +# create an instance of ChatCompletionToolMessageParam from a dict +chat_completion_tool_message_param_from_dict = ChatCompletionToolMessageParam.from_dict(chat_completion_tool_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ChatCompletionUserMessageParam.md b/src/together/generated/docs/ChatCompletionUserMessageParam.md new file mode 100644 index 00000000..1720154b --- /dev/null +++ b/src/together/generated/docs/ChatCompletionUserMessageParam.md @@ -0,0 +1,29 @@ +# ChatCompletionUserMessageParam + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | +**role** | **str** | | +**name** | **str** | | [optional] + +## Example + +```python +from together.generated.models.chat_completion_user_message_param import ChatCompletionUserMessageParam + +# TODO update the JSON string below +json = "{}" +# create an instance of ChatCompletionUserMessageParam from a JSON string +chat_completion_user_message_param_instance = ChatCompletionUserMessageParam.from_json(json) +# print the JSON string representation of the object +print(ChatCompletionUserMessageParam.to_json()) + +# convert the object into a dict +chat_completion_user_message_param_dict = chat_completion_user_message_param_instance.to_dict() +# create an instance of ChatCompletionUserMessageParam from a dict +chat_completion_user_message_param_from_dict = ChatCompletionUserMessageParam.from_dict(chat_completion_user_message_param_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionApi.md b/src/together/generated/docs/CompletionApi.md new file mode 100644 index 00000000..3e0ef088 --- /dev/null +++ b/src/together/generated/docs/CompletionApi.md @@ -0,0 +1,93 @@ +# together.generated.CompletionApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**completions**](CompletionApi.md#completions) | **POST** /completions | Create completion + + +# **completions** +> CompletionResponse completions(completion_request=completion_request) + +Create completion + +Query a language, code, or image model. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.completion_request import CompletionRequest +from together.generated.models.completion_response import CompletionResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.CompletionApi(api_client) + completion_request = together.generated.CompletionRequest() # CompletionRequest | (optional) + + try: + # Create completion + api_response = await api_instance.completions(completion_request=completion_request) + print("The response of CompletionApi->completions:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling CompletionApi->completions: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **completion_request** | [**CompletionRequest**](CompletionRequest.md)| | [optional] + +### Return type + +[**CompletionResponse**](CompletionResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json, text/event-stream + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**503** | Overloaded | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChoice.md b/src/together/generated/docs/CompletionChoice.md new file mode 100644 index 00000000..8c8f978c --- /dev/null +++ b/src/together/generated/docs/CompletionChoice.md @@ -0,0 +1,27 @@ +# CompletionChoice + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] + +## Example + +```python +from together.generated.models.completion_choice import CompletionChoice + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionChoice from a JSON string +completion_choice_instance = CompletionChoice.from_json(json) +# print the JSON string representation of the object +print(CompletionChoice.to_json()) + +# convert the object into a dict +completion_choice_dict = completion_choice_instance.to_dict() +# create an instance of CompletionChoice from a dict +completion_choice_from_dict = CompletionChoice.from_dict(completion_choice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChoicesDataInner.md b/src/together/generated/docs/CompletionChoicesDataInner.md new file mode 100644 index 00000000..370136e4 --- /dev/null +++ b/src/together/generated/docs/CompletionChoicesDataInner.md @@ -0,0 +1,30 @@ +# CompletionChoicesDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] +**seed** | **int** | | [optional] +**finish_reason** | [**FinishReason**](FinishReason.md) | | [optional] +**logprobs** | [**LogprobsPart**](.md) | | [optional] + +## Example + +```python +from together.generated.models.completion_choices_data_inner import CompletionChoicesDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionChoicesDataInner from a JSON string +completion_choices_data_inner_instance = CompletionChoicesDataInner.from_json(json) +# print the JSON string representation of the object +print(CompletionChoicesDataInner.to_json()) + +# convert the object into a dict +completion_choices_data_inner_dict = completion_choices_data_inner_instance.to_dict() +# create an instance of CompletionChoicesDataInner from a dict +completion_choices_data_inner_from_dict = CompletionChoicesDataInner.from_dict(completion_choices_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChunk.md b/src/together/generated/docs/CompletionChunk.md new file mode 100644 index 00000000..2d9fe0e5 --- /dev/null +++ b/src/together/generated/docs/CompletionChunk.md @@ -0,0 +1,32 @@ +# CompletionChunk + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**token** | [**CompletionToken**](CompletionToken.md) | | +**choices** | [**List[CompletionChoice]**](CompletionChoice.md) | | +**usage** | [**CompletionChunkUsage**](CompletionChunkUsage.md) | | +**seed** | **int** | | [optional] +**finish_reason** | [**FinishReason**](FinishReason.md) | | + +## Example + +```python +from together.generated.models.completion_chunk import CompletionChunk + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionChunk from a JSON string +completion_chunk_instance = CompletionChunk.from_json(json) +# print the JSON string representation of the object +print(CompletionChunk.to_json()) + +# convert the object into a dict +completion_chunk_dict = completion_chunk_instance.to_dict() +# create an instance of CompletionChunk from a dict +completion_chunk_from_dict = CompletionChunk.from_dict(completion_chunk_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionChunkUsage.md b/src/together/generated/docs/CompletionChunkUsage.md new file mode 100644 index 00000000..3e74c9f4 --- /dev/null +++ b/src/together/generated/docs/CompletionChunkUsage.md @@ -0,0 +1,29 @@ +# CompletionChunkUsage + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**prompt_tokens** | **int** | | +**completion_tokens** | **int** | | +**total_tokens** | **int** | | + +## Example + +```python +from together.generated.models.completion_chunk_usage import CompletionChunkUsage + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionChunkUsage from a JSON string +completion_chunk_usage_instance = CompletionChunkUsage.from_json(json) +# print the JSON string representation of the object +print(CompletionChunkUsage.to_json()) + +# convert the object into a dict +completion_chunk_usage_dict = completion_chunk_usage_instance.to_dict() +# create an instance of CompletionChunkUsage from a dict +completion_chunk_usage_from_dict = CompletionChunkUsage.from_dict(completion_chunk_usage_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionEvent.md b/src/together/generated/docs/CompletionEvent.md new file mode 100644 index 00000000..92acef57 --- /dev/null +++ b/src/together/generated/docs/CompletionEvent.md @@ -0,0 +1,27 @@ +# CompletionEvent + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**CompletionChunk**](CompletionChunk.md) | | + +## Example + +```python +from together.generated.models.completion_event import CompletionEvent + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionEvent from a JSON string +completion_event_instance = CompletionEvent.from_json(json) +# print the JSON string representation of the object +print(CompletionEvent.to_json()) + +# convert the object into a dict +completion_event_dict = completion_event_instance.to_dict() +# create an instance of CompletionEvent from a dict +completion_event_from_dict = CompletionEvent.from_dict(completion_event_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequest.md b/src/together/generated/docs/CompletionRequest.md new file mode 100644 index 00000000..0af19ec6 --- /dev/null +++ b/src/together/generated/docs/CompletionRequest.md @@ -0,0 +1,44 @@ +# CompletionRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**prompt** | **str** | A string providing context for the model to complete. | +**model** | [**CompletionRequestModel**](CompletionRequestModel.md) | | +**max_tokens** | **int** | The maximum number of tokens to generate. | [optional] +**stop** | **List[str]** | A list of string sequences that will truncate (stop) inference text output. For example, \"</s>\" will stop generation as soon as the model generates the given token. | [optional] +**temperature** | **float** | A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output. | [optional] +**top_p** | **float** | A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text. | [optional] +**top_k** | **int** | An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. | [optional] +**repetition_penalty** | **float** | A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. | [optional] +**stream** | **bool** | If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results. | [optional] +**logprobs** | **int** | Determines the number of most likely tokens to return at each token position log probabilities to return. | [optional] +**echo** | **bool** | If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs. | [optional] +**n** | **int** | The number of completions to generate for each prompt. | [optional] +**safety_model** | [**CompletionRequestSafetyModel**](CompletionRequestSafetyModel.md) | | [optional] +**min_p** | **float** | A number between 0 and 1 that can be used as an alternative to top-p and top-k. | [optional] +**presence_penalty** | **float** | A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics. | [optional] +**frequency_penalty** | **float** | A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned. | [optional] +**logit_bias** | **Dict[str, float]** | Adjusts the likelihood of specific tokens appearing in the generated output. | [optional] +**seed** | **int** | Seed value for reproducibility. | [optional] + +## Example + +```python +from together.generated.models.completion_request import CompletionRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionRequest from a JSON string +completion_request_instance = CompletionRequest.from_json(json) +# print the JSON string representation of the object +print(CompletionRequest.to_json()) + +# convert the object into a dict +completion_request_dict = completion_request_instance.to_dict() +# create an instance of CompletionRequest from a dict +completion_request_from_dict = CompletionRequest.from_dict(completion_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequestModel.md b/src/together/generated/docs/CompletionRequestModel.md new file mode 100644 index 00000000..15040351 --- /dev/null +++ b/src/together/generated/docs/CompletionRequestModel.md @@ -0,0 +1,27 @@ +# CompletionRequestModel + +The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.completion_request_model import CompletionRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionRequestModel from a JSON string +completion_request_model_instance = CompletionRequestModel.from_json(json) +# print the JSON string representation of the object +print(CompletionRequestModel.to_json()) + +# convert the object into a dict +completion_request_model_dict = completion_request_model_instance.to_dict() +# create an instance of CompletionRequestModel from a dict +completion_request_model_from_dict = CompletionRequestModel.from_dict(completion_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionRequestSafetyModel.md b/src/together/generated/docs/CompletionRequestSafetyModel.md new file mode 100644 index 00000000..a5b83b73 --- /dev/null +++ b/src/together/generated/docs/CompletionRequestSafetyModel.md @@ -0,0 +1,27 @@ +# CompletionRequestSafetyModel + +The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.completion_request_safety_model import CompletionRequestSafetyModel + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionRequestSafetyModel from a JSON string +completion_request_safety_model_instance = CompletionRequestSafetyModel.from_json(json) +# print the JSON string representation of the object +print(CompletionRequestSafetyModel.to_json()) + +# convert the object into a dict +completion_request_safety_model_dict = completion_request_safety_model_instance.to_dict() +# create an instance of CompletionRequestSafetyModel from a dict +completion_request_safety_model_from_dict = CompletionRequestSafetyModel.from_dict(completion_request_safety_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionResponse.md b/src/together/generated/docs/CompletionResponse.md new file mode 100644 index 00000000..6170558e --- /dev/null +++ b/src/together/generated/docs/CompletionResponse.md @@ -0,0 +1,33 @@ +# CompletionResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**choices** | [**List[CompletionChoicesDataInner]**](CompletionChoicesDataInner.md) | | +**prompt** | [**List[PromptPartInner]**](PromptPartInner.md) | | [optional] +**usage** | [**UsageData**](UsageData.md) | | +**created** | **int** | | +**model** | **str** | | +**object** | **str** | | + +## Example + +```python +from together.generated.models.completion_response import CompletionResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionResponse from a JSON string +completion_response_instance = CompletionResponse.from_json(json) +# print the JSON string representation of the object +print(CompletionResponse.to_json()) + +# convert the object into a dict +completion_response_dict = completion_response_instance.to_dict() +# create an instance of CompletionResponse from a dict +completion_response_from_dict = CompletionResponse.from_dict(completion_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionStream.md b/src/together/generated/docs/CompletionStream.md new file mode 100644 index 00000000..aa6342a9 --- /dev/null +++ b/src/together/generated/docs/CompletionStream.md @@ -0,0 +1,27 @@ +# CompletionStream + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | **str** | | + +## Example + +```python +from together.generated.models.completion_stream import CompletionStream + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionStream from a JSON string +completion_stream_instance = CompletionStream.from_json(json) +# print the JSON string representation of the object +print(CompletionStream.to_json()) + +# convert the object into a dict +completion_stream_dict = completion_stream_instance.to_dict() +# create an instance of CompletionStream from a dict +completion_stream_from_dict = CompletionStream.from_dict(completion_stream_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CompletionToken.md b/src/together/generated/docs/CompletionToken.md new file mode 100644 index 00000000..f4d5f6b9 --- /dev/null +++ b/src/together/generated/docs/CompletionToken.md @@ -0,0 +1,30 @@ +# CompletionToken + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **int** | | +**text** | **str** | | +**logprob** | **float** | | +**special** | **bool** | | + +## Example + +```python +from together.generated.models.completion_token import CompletionToken + +# TODO update the JSON string below +json = "{}" +# create an instance of CompletionToken from a JSON string +completion_token_instance = CompletionToken.from_json(json) +# print the JSON string representation of the object +print(CompletionToken.to_json()) + +# convert the object into a dict +completion_token_dict = completion_token_instance.to_dict() +# create an instance of CompletionToken from a dict +completion_token_from_dict = CompletionToken.from_dict(completion_token_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/CreateEndpointRequest.md b/src/together/generated/docs/CreateEndpointRequest.md new file mode 100644 index 00000000..add9632f --- /dev/null +++ b/src/together/generated/docs/CreateEndpointRequest.md @@ -0,0 +1,33 @@ +# CreateEndpointRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**display_name** | **str** | A human-readable name for the endpoint | [optional] +**model** | **str** | The model to deploy on this endpoint | +**hardware** | **str** | The hardware configuration to use for this endpoint | +**autoscaling** | [**Autoscaling**](Autoscaling.md) | Configuration for automatic scaling of the endpoint | +**disable_prompt_cache** | **bool** | Whether to disable the prompt cache for this endpoint | [optional] [default to False] +**disable_speculative_decoding** | **bool** | Whether to disable speculative decoding for this endpoint | [optional] [default to False] +**state** | **str** | The desired state of the endpoint | [optional] [default to 'STARTED'] + +## Example + +```python +from together.generated.models.create_endpoint_request import CreateEndpointRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of CreateEndpointRequest from a JSON string +create_endpoint_request_instance = CreateEndpointRequest.from_json(json) +# print the JSON string representation of the object +print(CreateEndpointRequest.to_json()) + +# convert the object into a dict +create_endpoint_request_dict = create_endpoint_request_instance.to_dict() +# create an instance of CreateEndpointRequest from a dict +create_endpoint_request_from_dict = CreateEndpointRequest.from_dict(create_endpoint_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/DedicatedEndpoint.md b/src/together/generated/docs/DedicatedEndpoint.md new file mode 100644 index 00000000..eb9079f0 --- /dev/null +++ b/src/together/generated/docs/DedicatedEndpoint.md @@ -0,0 +1,38 @@ +# DedicatedEndpoint + +Details about a dedicated endpoint deployment + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | The type of object | +**id** | **str** | Unique identifier for the endpoint | +**name** | **str** | System name for the endpoint | +**display_name** | **str** | Human-readable name for the endpoint | +**model** | **str** | The model deployed on this endpoint | +**hardware** | **str** | The hardware configuration used for this endpoint | +**type** | **str** | The type of endpoint | +**owner** | **str** | The owner of this endpoint | +**state** | **str** | Current state of the endpoint | +**autoscaling** | [**Autoscaling**](Autoscaling.md) | Configuration for automatic scaling of the endpoint | +**created_at** | **datetime** | Timestamp when the endpoint was created | + +## Example + +```python +from together.generated.models.dedicated_endpoint import DedicatedEndpoint + +# TODO update the JSON string below +json = "{}" +# create an instance of DedicatedEndpoint from a JSON string +dedicated_endpoint_instance = DedicatedEndpoint.from_json(json) +# print the JSON string representation of the object +print(DedicatedEndpoint.to_json()) + +# convert the object into a dict +dedicated_endpoint_dict = dedicated_endpoint_instance.to_dict() +# create an instance of DedicatedEndpoint from a dict +dedicated_endpoint_from_dict = DedicatedEndpoint.from_dict(dedicated_endpoint_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsApi.md b/src/together/generated/docs/EmbeddingsApi.md new file mode 100644 index 00000000..3035a5ac --- /dev/null +++ b/src/together/generated/docs/EmbeddingsApi.md @@ -0,0 +1,93 @@ +# together.generated.EmbeddingsApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**embeddings**](EmbeddingsApi.md#embeddings) | **POST** /embeddings | Create embedding + + +# **embeddings** +> EmbeddingsResponse embeddings(embeddings_request=embeddings_request) + +Create embedding + +Query an embedding model for a given string of text. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.embeddings_request import EmbeddingsRequest +from together.generated.models.embeddings_response import EmbeddingsResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EmbeddingsApi(api_client) + embeddings_request = together.generated.EmbeddingsRequest() # EmbeddingsRequest | (optional) + + try: + # Create embedding + api_response = await api_instance.embeddings(embeddings_request=embeddings_request) + print("The response of EmbeddingsApi->embeddings:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EmbeddingsApi->embeddings: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **embeddings_request** | [**EmbeddingsRequest**](EmbeddingsRequest.md)| | [optional] + +### Return type + +[**EmbeddingsResponse**](EmbeddingsResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**503** | Overloaded | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequest.md b/src/together/generated/docs/EmbeddingsRequest.md new file mode 100644 index 00000000..f14bc778 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsRequest.md @@ -0,0 +1,28 @@ +# EmbeddingsRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model** | [**EmbeddingsRequestModel**](EmbeddingsRequestModel.md) | | +**input** | [**EmbeddingsRequestInput**](EmbeddingsRequestInput.md) | | + +## Example + +```python +from together.generated.models.embeddings_request import EmbeddingsRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsRequest from a JSON string +embeddings_request_instance = EmbeddingsRequest.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsRequest.to_json()) + +# convert the object into a dict +embeddings_request_dict = embeddings_request_instance.to_dict() +# create an instance of EmbeddingsRequest from a dict +embeddings_request_from_dict = EmbeddingsRequest.from_dict(embeddings_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequestInput.md b/src/together/generated/docs/EmbeddingsRequestInput.md new file mode 100644 index 00000000..e3b4af93 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsRequestInput.md @@ -0,0 +1,26 @@ +# EmbeddingsRequestInput + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsRequestInput from a JSON string +embeddings_request_input_instance = EmbeddingsRequestInput.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsRequestInput.to_json()) + +# convert the object into a dict +embeddings_request_input_dict = embeddings_request_input_instance.to_dict() +# create an instance of EmbeddingsRequestInput from a dict +embeddings_request_input_from_dict = EmbeddingsRequestInput.from_dict(embeddings_request_input_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsRequestModel.md b/src/together/generated/docs/EmbeddingsRequestModel.md new file mode 100644 index 00000000..6376e042 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsRequestModel.md @@ -0,0 +1,27 @@ +# EmbeddingsRequestModel + +The name of the embedding model to use.

[See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsRequestModel from a JSON string +embeddings_request_model_instance = EmbeddingsRequestModel.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsRequestModel.to_json()) + +# convert the object into a dict +embeddings_request_model_dict = embeddings_request_model_instance.to_dict() +# create an instance of EmbeddingsRequestModel from a dict +embeddings_request_model_from_dict = EmbeddingsRequestModel.from_dict(embeddings_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsResponse.md b/src/together/generated/docs/EmbeddingsResponse.md new file mode 100644 index 00000000..0e08c129 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsResponse.md @@ -0,0 +1,29 @@ +# EmbeddingsResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**model** | **str** | | +**data** | [**List[EmbeddingsResponseDataInner]**](EmbeddingsResponseDataInner.md) | | + +## Example + +```python +from together.generated.models.embeddings_response import EmbeddingsResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsResponse from a JSON string +embeddings_response_instance = EmbeddingsResponse.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsResponse.to_json()) + +# convert the object into a dict +embeddings_response_dict = embeddings_response_instance.to_dict() +# create an instance of EmbeddingsResponse from a dict +embeddings_response_from_dict = EmbeddingsResponse.from_dict(embeddings_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EmbeddingsResponseDataInner.md b/src/together/generated/docs/EmbeddingsResponseDataInner.md new file mode 100644 index 00000000..cc11de78 --- /dev/null +++ b/src/together/generated/docs/EmbeddingsResponseDataInner.md @@ -0,0 +1,29 @@ +# EmbeddingsResponseDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**embedding** | **List[float]** | | +**index** | **int** | | + +## Example + +```python +from together.generated.models.embeddings_response_data_inner import EmbeddingsResponseDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of EmbeddingsResponseDataInner from a JSON string +embeddings_response_data_inner_instance = EmbeddingsResponseDataInner.from_json(json) +# print the JSON string representation of the object +print(EmbeddingsResponseDataInner.to_json()) + +# convert the object into a dict +embeddings_response_data_inner_dict = embeddings_response_data_inner_instance.to_dict() +# create an instance of EmbeddingsResponseDataInner from a dict +embeddings_response_data_inner_from_dict = EmbeddingsResponseDataInner.from_dict(embeddings_response_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EndpointPricing.md b/src/together/generated/docs/EndpointPricing.md new file mode 100644 index 00000000..e557beff --- /dev/null +++ b/src/together/generated/docs/EndpointPricing.md @@ -0,0 +1,28 @@ +# EndpointPricing + +Pricing details for using an endpoint + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**cents_per_minute** | **float** | Cost per minute of endpoint uptime in cents | + +## Example + +```python +from together.generated.models.endpoint_pricing import EndpointPricing + +# TODO update the JSON string below +json = "{}" +# create an instance of EndpointPricing from a JSON string +endpoint_pricing_instance = EndpointPricing.from_json(json) +# print the JSON string representation of the object +print(EndpointPricing.to_json()) + +# convert the object into a dict +endpoint_pricing_dict = endpoint_pricing_instance.to_dict() +# create an instance of EndpointPricing from a dict +endpoint_pricing_from_dict = EndpointPricing.from_dict(endpoint_pricing_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/EndpointsApi.md b/src/together/generated/docs/EndpointsApi.md new file mode 100644 index 00000000..74fc70f1 --- /dev/null +++ b/src/together/generated/docs/EndpointsApi.md @@ -0,0 +1,416 @@ +# together.generated.EndpointsApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**create_endpoint**](EndpointsApi.md#create_endpoint) | **POST** /endpoints | Create a dedicated endpoint, it will start automatically +[**delete_endpoint**](EndpointsApi.md#delete_endpoint) | **DELETE** /endpoints/{endpointId} | Delete endpoint +[**get_endpoint**](EndpointsApi.md#get_endpoint) | **GET** /endpoints/{endpointId} | Get endpoint by ID +[**list_endpoints**](EndpointsApi.md#list_endpoints) | **GET** /endpoints | List all endpoints, can be filtered by type +[**update_endpoint**](EndpointsApi.md#update_endpoint) | **PATCH** /endpoints/{endpointId} | Update endpoint, this can also be used to start or stop a dedicated endpoint + + +# **create_endpoint** +> DedicatedEndpoint create_endpoint(create_endpoint_request) + +Create a dedicated endpoint, it will start automatically + +Creates a new dedicated endpoint for serving models. The endpoint will automatically start after creation. You can deploy any supported model on hardware configurations that meet the model's requirements. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + create_endpoint_request = together.generated.CreateEndpointRequest() # CreateEndpointRequest | + + try: + # Create a dedicated endpoint, it will start automatically + api_response = await api_instance.create_endpoint(create_endpoint_request) + print("The response of EndpointsApi->create_endpoint:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EndpointsApi->create_endpoint: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **create_endpoint_request** | [**CreateEndpointRequest**](CreateEndpointRequest.md)| | + +### Return type + +[**DedicatedEndpoint**](DedicatedEndpoint.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**403** | Unauthorized | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **delete_endpoint** +> delete_endpoint(endpoint_id) + +Delete endpoint + +Permanently deletes an endpoint. This action cannot be undone. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to delete + + try: + # Delete endpoint + await api_instance.delete_endpoint(endpoint_id) + except Exception as e: + print("Exception when calling EndpointsApi->delete_endpoint: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **endpoint_id** | **str**| The ID of the endpoint to delete | + +### Return type + +void (empty response body) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**204** | No Content - Endpoint successfully deleted | - | +**403** | Unauthorized | - | +**404** | Not Found | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **get_endpoint** +> DedicatedEndpoint get_endpoint(endpoint_id) + +Get endpoint by ID + +Retrieves details about a specific endpoint, including its current state, configuration, and scaling settings. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to retrieve + + try: + # Get endpoint by ID + api_response = await api_instance.get_endpoint(endpoint_id) + print("The response of EndpointsApi->get_endpoint:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EndpointsApi->get_endpoint: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **endpoint_id** | **str**| The ID of the endpoint to retrieve | + +### Return type + +[**DedicatedEndpoint**](DedicatedEndpoint.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**403** | Unauthorized | - | +**404** | Not Found | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **list_endpoints** +> ListEndpoints200Response list_endpoints(type=type) + +List all endpoints, can be filtered by type + +Returns a list of all endpoints associated with your account. You can filter the results by type (dedicated or serverless). + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.list_endpoints200_response import ListEndpoints200Response +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + type = 'dedicated' # str | Filter endpoints by type (optional) + + try: + # List all endpoints, can be filtered by type + api_response = await api_instance.list_endpoints(type=type) + print("The response of EndpointsApi->list_endpoints:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EndpointsApi->list_endpoints: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **type** | **str**| Filter endpoints by type | [optional] + +### Return type + +[**ListEndpoints200Response**](ListEndpoints200Response.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**403** | Unauthorized | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **update_endpoint** +> DedicatedEndpoint update_endpoint(endpoint_id, update_endpoint_request) + +Update endpoint, this can also be used to start or stop a dedicated endpoint + +Updates an existing endpoint's configuration. You can modify the display name, autoscaling settings, or change the endpoint's state (start/stop). + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.update_endpoint_request import UpdateEndpointRequest +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.EndpointsApi(api_client) + endpoint_id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7' # str | The ID of the endpoint to update + update_endpoint_request = together.generated.UpdateEndpointRequest() # UpdateEndpointRequest | + + try: + # Update endpoint, this can also be used to start or stop a dedicated endpoint + api_response = await api_instance.update_endpoint(endpoint_id, update_endpoint_request) + print("The response of EndpointsApi->update_endpoint:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling EndpointsApi->update_endpoint: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **endpoint_id** | **str**| The ID of the endpoint to update | + **update_endpoint_request** | [**UpdateEndpointRequest**](UpdateEndpointRequest.md)| | + +### Return type + +[**DedicatedEndpoint**](DedicatedEndpoint.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**403** | Unauthorized | - | +**404** | Not Found | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ErrorData.md b/src/together/generated/docs/ErrorData.md new file mode 100644 index 00000000..d29d8ec3 --- /dev/null +++ b/src/together/generated/docs/ErrorData.md @@ -0,0 +1,27 @@ +# ErrorData + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**error** | [**ErrorDataError**](ErrorDataError.md) | | + +## Example + +```python +from together.generated.models.error_data import ErrorData + +# TODO update the JSON string below +json = "{}" +# create an instance of ErrorData from a JSON string +error_data_instance = ErrorData.from_json(json) +# print the JSON string representation of the object +print(ErrorData.to_json()) + +# convert the object into a dict +error_data_dict = error_data_instance.to_dict() +# create an instance of ErrorData from a dict +error_data_from_dict = ErrorData.from_dict(error_data_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ErrorDataError.md b/src/together/generated/docs/ErrorDataError.md new file mode 100644 index 00000000..d4990950 --- /dev/null +++ b/src/together/generated/docs/ErrorDataError.md @@ -0,0 +1,30 @@ +# ErrorDataError + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**message** | **str** | | +**type** | **str** | | +**param** | **str** | | [optional] +**code** | **str** | | [optional] + +## Example + +```python +from together.generated.models.error_data_error import ErrorDataError + +# TODO update the JSON string below +json = "{}" +# create an instance of ErrorDataError from a JSON string +error_data_error_instance = ErrorDataError.from_json(json) +# print the JSON string representation of the object +print(ErrorDataError.to_json()) + +# convert the object into a dict +error_data_error_dict = error_data_error_instance.to_dict() +# create an instance of ErrorDataError from a dict +error_data_error_from_dict = ErrorDataError.from_dict(error_data_error_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileDeleteResponse.md b/src/together/generated/docs/FileDeleteResponse.md new file mode 100644 index 00000000..e20ba15d --- /dev/null +++ b/src/together/generated/docs/FileDeleteResponse.md @@ -0,0 +1,28 @@ +# FileDeleteResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | [optional] +**deleted** | **bool** | | [optional] + +## Example + +```python +from together.generated.models.file_delete_response import FileDeleteResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of FileDeleteResponse from a JSON string +file_delete_response_instance = FileDeleteResponse.from_json(json) +# print the JSON string representation of the object +print(FileDeleteResponse.to_json()) + +# convert the object into a dict +file_delete_response_dict = file_delete_response_instance.to_dict() +# create an instance of FileDeleteResponse from a dict +file_delete_response_from_dict = FileDeleteResponse.from_dict(file_delete_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileList.md b/src/together/generated/docs/FileList.md new file mode 100644 index 00000000..83bc78a6 --- /dev/null +++ b/src/together/generated/docs/FileList.md @@ -0,0 +1,27 @@ +# FileList + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**List[FileResponse]**](FileResponse.md) | | + +## Example + +```python +from together.generated.models.file_list import FileList + +# TODO update the JSON string below +json = "{}" +# create an instance of FileList from a JSON string +file_list_instance = FileList.from_json(json) +# print the JSON string representation of the object +print(FileList.to_json()) + +# convert the object into a dict +file_list_dict = file_list_instance.to_dict() +# create an instance of FileList from a dict +file_list_from_dict = FileList.from_dict(file_list_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileObject.md b/src/together/generated/docs/FileObject.md new file mode 100644 index 00000000..a95e1fe1 --- /dev/null +++ b/src/together/generated/docs/FileObject.md @@ -0,0 +1,30 @@ +# FileObject + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | [optional] +**id** | **str** | | [optional] +**filename** | **str** | | [optional] +**size** | **int** | | [optional] + +## Example + +```python +from together.generated.models.file_object import FileObject + +# TODO update the JSON string below +json = "{}" +# create an instance of FileObject from a JSON string +file_object_instance = FileObject.from_json(json) +# print the JSON string representation of the object +print(FileObject.to_json()) + +# convert the object into a dict +file_object_dict = file_object_instance.to_dict() +# create an instance of FileObject from a dict +file_object_from_dict = FileObject.from_dict(file_object_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FileResponse.md b/src/together/generated/docs/FileResponse.md new file mode 100644 index 00000000..88317020 --- /dev/null +++ b/src/together/generated/docs/FileResponse.md @@ -0,0 +1,35 @@ +# FileResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**object** | **str** | | +**created_at** | **int** | | +**filename** | **str** | | +**bytes** | **int** | | +**purpose** | **str** | | +**processed** | **bool** | | +**file_type** | **str** | | +**line_count** | **int** | | + +## Example + +```python +from together.generated.models.file_response import FileResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of FileResponse from a JSON string +file_response_instance = FileResponse.from_json(json) +# print the JSON string representation of the object +print(FileResponse.to_json()) + +# convert the object into a dict +file_response_dict = file_response_instance.to_dict() +# create an instance of FileResponse from a dict +file_response_from_dict = FileResponse.from_dict(file_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FilesApi.md b/src/together/generated/docs/FilesApi.md new file mode 100644 index 00000000..5d0e6962 --- /dev/null +++ b/src/together/generated/docs/FilesApi.md @@ -0,0 +1,320 @@ +# together.generated.FilesApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**files_get**](FilesApi.md#files_get) | **GET** /files | List all files +[**files_id_content_get**](FilesApi.md#files_id_content_get) | **GET** /files/{id}/content | Get file contents +[**files_id_delete**](FilesApi.md#files_id_delete) | **DELETE** /files/{id} | Delete a file +[**files_id_get**](FilesApi.md#files_id_get) | **GET** /files/{id} | List file + + +# **files_get** +> FileList files_get() + +List all files + +List the metadata for all uploaded data files. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.file_list import FileList +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FilesApi(api_client) + + try: + # List all files + api_response = await api_instance.files_get() + print("The response of FilesApi->files_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FilesApi->files_get: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**FileList**](FileList.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | List of files | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **files_id_content_get** +> FileObject files_id_content_get(id) + +Get file contents + +Get the contents of a single uploaded data file. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.file_object import FileObject +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FilesApi(api_client) + id = 'id_example' # str | + + try: + # Get file contents + api_response = await api_instance.files_id_content_get(id) + print("The response of FilesApi->files_id_content_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FilesApi->files_id_content_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FileObject**](FileObject.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | File content retrieved successfully | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **files_id_delete** +> FileDeleteResponse files_id_delete(id) + +Delete a file + +Delete a previously uploaded data file. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.file_delete_response import FileDeleteResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FilesApi(api_client) + id = 'id_example' # str | + + try: + # Delete a file + api_response = await api_instance.files_id_delete(id) + print("The response of FilesApi->files_id_delete:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FilesApi->files_id_delete: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FileDeleteResponse**](FileDeleteResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | File deleted successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **files_id_get** +> FileResponse files_id_get(id) + +List file + +List the metadata for a single uploaded data file. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.file_response import FileResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FilesApi(api_client) + id = 'id_example' # str | + + try: + # List file + api_response = await api_instance.files_id_get(id) + print("The response of FilesApi->files_id_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FilesApi->files_id_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FileResponse**](FileResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | File retrieved successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTuneEvent.md b/src/together/generated/docs/FineTuneEvent.md new file mode 100644 index 00000000..23eea549 --- /dev/null +++ b/src/together/generated/docs/FineTuneEvent.md @@ -0,0 +1,40 @@ +# FineTuneEvent + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**created_at** | **str** | | +**level** | [**FinetuneEventLevels**](FinetuneEventLevels.md) | | [optional] +**message** | **str** | | +**type** | [**FinetuneEventType**](FinetuneEventType.md) | | +**param_count** | **int** | | +**token_count** | **int** | | +**total_steps** | **int** | | +**wandb_url** | **str** | | +**step** | **int** | | +**checkpoint_path** | **str** | | +**model_path** | **str** | | +**training_offset** | **int** | | +**hash** | **str** | | + +## Example + +```python +from together.generated.models.fine_tune_event import FineTuneEvent + +# TODO update the JSON string below +json = "{}" +# create an instance of FineTuneEvent from a JSON string +fine_tune_event_instance = FineTuneEvent.from_json(json) +# print the JSON string representation of the object +print(FineTuneEvent.to_json()) + +# convert the object into a dict +fine_tune_event_dict = fine_tune_event_instance.to_dict() +# create an instance of FineTuneEvent from a dict +fine_tune_event_from_dict = FineTuneEvent.from_dict(fine_tune_event_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequest.md b/src/together/generated/docs/FineTunesPostRequest.md new file mode 100644 index 00000000..f75f37cd --- /dev/null +++ b/src/together/generated/docs/FineTunesPostRequest.md @@ -0,0 +1,45 @@ +# FineTunesPostRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**training_file** | **str** | File-ID of a training file uploaded to the Together API | +**validation_file** | **str** | File-ID of a validation file uploaded to the Together API | [optional] +**model** | **str** | Name of the base model to run fine-tune job on | +**n_epochs** | **int** | Number of epochs for fine-tuning | [optional] [default to 1] +**n_checkpoints** | **int** | Number of checkpoints to save during fine-tuning | [optional] [default to 1] +**n_evals** | **int** | Number of evaluations to be run on a given validation set during training | [optional] [default to 0] +**batch_size** | **int** | Batch size for fine-tuning | [optional] [default to 32] +**learning_rate** | **float** | Learning rate multiplier to use for training | [optional] [default to 0.000010] +**lr_scheduler** | [**LRScheduler**](.md) | | [optional] +**warmup_ratio** | **float** | The percent of steps at the start of training to linearly increase the learning rate. | [optional] [default to 0.0] +**max_grad_norm** | **float** | Max gradient norm to be used for gradient clipping. Set to 0 to disable. | [optional] [default to 1.0] +**weight_decay** | **float** | Weight decay | [optional] [default to 0.0] +**suffix** | **str** | Suffix that will be added to your fine-tuned model name | [optional] +**wandb_api_key** | **str** | API key for Weights & Biases integration | [optional] +**wandb_base_url** | **str** | The base URL of a dedicated Weights & Biases instance. | [optional] +**wandb_project_name** | **str** | The Weights & Biases project for your run. If not specified, will use `together` as the project name. | [optional] +**wandb_name** | **str** | The Weights & Biases name for your run. | [optional] +**train_on_inputs** | [**FineTunesPostRequestTrainOnInputs**](FineTunesPostRequestTrainOnInputs.md) | | [optional] [default to False] +**training_type** | [**FineTunesPostRequestTrainingType**](FineTunesPostRequestTrainingType.md) | | [optional] + +## Example + +```python +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of FineTunesPostRequest from a JSON string +fine_tunes_post_request_instance = FineTunesPostRequest.from_json(json) +# print the JSON string representation of the object +print(FineTunesPostRequest.to_json()) + +# convert the object into a dict +fine_tunes_post_request_dict = fine_tunes_post_request_instance.to_dict() +# create an instance of FineTunesPostRequest from a dict +fine_tunes_post_request_from_dict = FineTunesPostRequest.from_dict(fine_tunes_post_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md b/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md new file mode 100644 index 00000000..554e32a3 --- /dev/null +++ b/src/together/generated/docs/FineTunesPostRequestTrainOnInputs.md @@ -0,0 +1,27 @@ +# FineTunesPostRequestTrainOnInputs + +Whether to mask the user messages in conversational data or prompts in instruction data. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.fine_tunes_post_request_train_on_inputs import FineTunesPostRequestTrainOnInputs + +# TODO update the JSON string below +json = "{}" +# create an instance of FineTunesPostRequestTrainOnInputs from a JSON string +fine_tunes_post_request_train_on_inputs_instance = FineTunesPostRequestTrainOnInputs.from_json(json) +# print the JSON string representation of the object +print(FineTunesPostRequestTrainOnInputs.to_json()) + +# convert the object into a dict +fine_tunes_post_request_train_on_inputs_dict = fine_tunes_post_request_train_on_inputs_instance.to_dict() +# create an instance of FineTunesPostRequestTrainOnInputs from a dict +fine_tunes_post_request_train_on_inputs_from_dict = FineTunesPostRequestTrainOnInputs.from_dict(fine_tunes_post_request_train_on_inputs_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTunesPostRequestTrainingType.md b/src/together/generated/docs/FineTunesPostRequestTrainingType.md new file mode 100644 index 00000000..92af3191 --- /dev/null +++ b/src/together/generated/docs/FineTunesPostRequestTrainingType.md @@ -0,0 +1,31 @@ +# FineTunesPostRequestTrainingType + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | +**lora_r** | **int** | | +**lora_alpha** | **int** | | +**lora_dropout** | **float** | | [optional] [default to 0.0] +**lora_trainable_modules** | **str** | | [optional] [default to 'all-linear'] + +## Example + +```python +from together.generated.models.fine_tunes_post_request_training_type import FineTunesPostRequestTrainingType + +# TODO update the JSON string below +json = "{}" +# create an instance of FineTunesPostRequestTrainingType from a JSON string +fine_tunes_post_request_training_type_instance = FineTunesPostRequestTrainingType.from_json(json) +# print the JSON string representation of the object +print(FineTunesPostRequestTrainingType.to_json()) + +# convert the object into a dict +fine_tunes_post_request_training_type_dict = fine_tunes_post_request_training_type_instance.to_dict() +# create an instance of FineTunesPostRequestTrainingType from a dict +fine_tunes_post_request_training_type_from_dict = FineTunesPostRequestTrainingType.from_dict(fine_tunes_post_request_training_type_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FineTuningApi.md b/src/together/generated/docs/FineTuningApi.md new file mode 100644 index 00000000..465f9925 --- /dev/null +++ b/src/together/generated/docs/FineTuningApi.md @@ -0,0 +1,488 @@ +# together.generated.FineTuningApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**fine_tunes_get**](FineTuningApi.md#fine_tunes_get) | **GET** /fine-tunes | List all jobs +[**fine_tunes_id_cancel_post**](FineTuningApi.md#fine_tunes_id_cancel_post) | **POST** /fine-tunes/{id}/cancel | Cancel job +[**fine_tunes_id_events_get**](FineTuningApi.md#fine_tunes_id_events_get) | **GET** /fine-tunes/{id}/events | List job events +[**fine_tunes_id_get**](FineTuningApi.md#fine_tunes_id_get) | **GET** /fine-tunes/{id} | List job +[**fine_tunes_post**](FineTuningApi.md#fine_tunes_post) | **POST** /fine-tunes | Create job +[**finetune_download_get**](FineTuningApi.md#finetune_download_get) | **GET** /finetune/download | Download model + + +# **fine_tunes_get** +> FinetuneList fine_tunes_get() + +List all jobs + +List the metadata for all fine-tuning jobs. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_list import FinetuneList +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + + try: + # List all jobs + api_response = await api_instance.fine_tunes_get() + print("The response of FineTuningApi->fine_tunes_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_get: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**FinetuneList**](FinetuneList.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | List of fine-tune jobs | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **fine_tunes_id_cancel_post** +> FinetuneResponse fine_tunes_id_cancel_post(id) + +Cancel job + +Cancel a currently running fine-tuning job. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + id = 'id_example' # str | Fine-tune ID to cancel. A string that starts with `ft-`. + + try: + # Cancel job + api_response = await api_instance.fine_tunes_id_cancel_post(id) + print("The response of FineTuningApi->fine_tunes_id_cancel_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_id_cancel_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| Fine-tune ID to cancel. A string that starts with `ft-`. | + +### Return type + +[**FinetuneResponse**](FinetuneResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Successfully cancelled the fine-tuning job. | - | +**400** | Invalid request parameters. | - | +**404** | Fine-tune ID not found. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **fine_tunes_id_events_get** +> FinetuneListEvents fine_tunes_id_events_get(id) + +List job events + +List the events for a single fine-tuning job. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_list_events import FinetuneListEvents +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + id = 'id_example' # str | + + try: + # List job events + api_response = await api_instance.fine_tunes_id_events_get(id) + print("The response of FineTuningApi->fine_tunes_id_events_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_id_events_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FinetuneListEvents**](FinetuneListEvents.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | List of fine-tune events | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **fine_tunes_id_get** +> FinetuneResponse fine_tunes_id_get(id) + +List job + +List the metadata for a single fine-tuning job. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + id = 'id_example' # str | + + try: + # List job + api_response = await api_instance.fine_tunes_id_get(id) + print("The response of FineTuningApi->fine_tunes_id_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_id_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +[**FinetuneResponse**](FinetuneResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Fine-tune job details retrieved successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **fine_tunes_post** +> FinetuneResponse fine_tunes_post(fine_tunes_post_request) + +Create job + +Use a model to create a fine-tuning job. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + fine_tunes_post_request = together.generated.FineTunesPostRequest() # FineTunesPostRequest | + + try: + # Create job + api_response = await api_instance.fine_tunes_post(fine_tunes_post_request) + print("The response of FineTuningApi->fine_tunes_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->fine_tunes_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **fine_tunes_post_request** | [**FineTunesPostRequest**](FineTunesPostRequest.md)| | + +### Return type + +[**FinetuneResponse**](FinetuneResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Fine-tuning job initiated successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **finetune_download_get** +> FinetuneDownloadResult finetune_download_get(ft_id, checkpoint_step=checkpoint_step, checkpoint=checkpoint, output=output) + +Download model + +Download a compressed fine-tuned model or checkpoint to local disk. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.finetune_download_result import FinetuneDownloadResult +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.FineTuningApi(api_client) + ft_id = 'ft_id_example' # str | Fine-tune ID to download. A string that starts with `ft-`. + checkpoint_step = 56 # int | Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. (optional) + checkpoint = 'checkpoint_example' # str | Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. (optional) + output = 'output_example' # str | Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. (optional) + + try: + # Download model + api_response = await api_instance.finetune_download_get(ft_id, checkpoint_step=checkpoint_step, checkpoint=checkpoint, output=output) + print("The response of FineTuningApi->finetune_download_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling FineTuningApi->finetune_download_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **ft_id** | **str**| Fine-tune ID to download. A string that starts with `ft-`. | + **checkpoint_step** | **int**| Specifies step number for checkpoint to download. Ignores `checkpoint` value if set. | [optional] + **checkpoint** | **str**| Specifies checkpoint type to download - `merged` vs `adapter`. This field is required if the checkpoint_step is not set. | [optional] + **output** | **str**| Specifies output file name for downloaded model. Defaults to `$PWD/{model_name}.{extension}`. | [optional] + +### Return type + +[**FinetuneDownloadResult**](FinetuneDownloadResult.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Successfully downloaded the fine-tuned model or checkpoint. | - | +**400** | Invalid request parameters. | - | +**404** | Fine-tune ID not found. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneDownloadResult.md b/src/together/generated/docs/FinetuneDownloadResult.md new file mode 100644 index 00000000..36bce63b --- /dev/null +++ b/src/together/generated/docs/FinetuneDownloadResult.md @@ -0,0 +1,31 @@ +# FinetuneDownloadResult + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | [optional] +**id** | **str** | | [optional] +**checkpoint_step** | **int** | | [optional] +**filename** | **str** | | [optional] +**size** | **int** | | [optional] + +## Example + +```python +from together.generated.models.finetune_download_result import FinetuneDownloadResult + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneDownloadResult from a JSON string +finetune_download_result_instance = FinetuneDownloadResult.from_json(json) +# print the JSON string representation of the object +print(FinetuneDownloadResult.to_json()) + +# convert the object into a dict +finetune_download_result_dict = finetune_download_result_instance.to_dict() +# create an instance of FinetuneDownloadResult from a dict +finetune_download_result_from_dict = FinetuneDownloadResult.from_dict(finetune_download_result_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneEventLevels.md b/src/together/generated/docs/FinetuneEventLevels.md new file mode 100644 index 00000000..0056898d --- /dev/null +++ b/src/together/generated/docs/FinetuneEventLevels.md @@ -0,0 +1,18 @@ +# FinetuneEventLevels + + +## Enum + +* `INFO` (value: `'info'`) + +* `WARNING` (value: `'warning'`) + +* `ERROR` (value: `'error'`) + +* `LEGACY_INFO` (value: `'legacy_info'`) + +* `LEGACY_IWARNING` (value: `'legacy_iwarning'`) + +* `LEGACY_IERROR` (value: `'legacy_ierror'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneEventType.md b/src/together/generated/docs/FinetuneEventType.md new file mode 100644 index 00000000..6f936a60 --- /dev/null +++ b/src/together/generated/docs/FinetuneEventType.md @@ -0,0 +1,56 @@ +# FinetuneEventType + + +## Enum + +* `JOB_PENDING` (value: `'job_pending'`) + +* `JOB_START` (value: `'job_start'`) + +* `JOB_STOPPED` (value: `'job_stopped'`) + +* `MODEL_DOWNLOADING` (value: `'model_downloading'`) + +* `MODEL_DOWNLOAD_COMPLETE` (value: `'model_download_complete'`) + +* `TRAINING_DATA_DOWNLOADING` (value: `'training_data_downloading'`) + +* `TRAINING_DATA_DOWNLOAD_COMPLETE` (value: `'training_data_download_complete'`) + +* `VALIDATION_DATA_DOWNLOADING` (value: `'validation_data_downloading'`) + +* `VALIDATION_DATA_DOWNLOAD_COMPLETE` (value: `'validation_data_download_complete'`) + +* `WANDB_INIT` (value: `'wandb_init'`) + +* `TRAINING_START` (value: `'training_start'`) + +* `CHECKPOINT_SAVE` (value: `'checkpoint_save'`) + +* `BILLING_LIMIT` (value: `'billing_limit'`) + +* `EPOCH_COMPLETE` (value: `'epoch_complete'`) + +* `TRAINING_COMPLETE` (value: `'training_complete'`) + +* `MODEL_COMPRESSING` (value: `'model_compressing'`) + +* `MODEL_COMPRESSION_COMPLETE` (value: `'model_compression_complete'`) + +* `MODEL_UPLOADING` (value: `'model_uploading'`) + +* `MODEL_UPLOAD_COMPLETE` (value: `'model_upload_complete'`) + +* `JOB_COMPLETE` (value: `'job_complete'`) + +* `JOB_ERROR` (value: `'job_error'`) + +* `CANCEL_REQUESTED` (value: `'cancel_requested'`) + +* `JOB_RESTARTED` (value: `'job_restarted'`) + +* `REFUND` (value: `'refund'`) + +* `WARNING` (value: `'warning'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneJobStatus.md b/src/together/generated/docs/FinetuneJobStatus.md new file mode 100644 index 00000000..750038be --- /dev/null +++ b/src/together/generated/docs/FinetuneJobStatus.md @@ -0,0 +1,24 @@ +# FinetuneJobStatus + + +## Enum + +* `PENDING` (value: `'pending'`) + +* `QUEUED` (value: `'queued'`) + +* `RUNNING` (value: `'running'`) + +* `COMPRESSING` (value: `'compressing'`) + +* `UPLOADING` (value: `'uploading'`) + +* `CANCEL_REQUESTED` (value: `'cancel_requested'`) + +* `CANCELLED` (value: `'cancelled'`) + +* `ERROR` (value: `'error'`) + +* `COMPLETED` (value: `'completed'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneList.md b/src/together/generated/docs/FinetuneList.md new file mode 100644 index 00000000..4785467b --- /dev/null +++ b/src/together/generated/docs/FinetuneList.md @@ -0,0 +1,27 @@ +# FinetuneList + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**List[FinetuneResponse]**](FinetuneResponse.md) | | + +## Example + +```python +from together.generated.models.finetune_list import FinetuneList + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneList from a JSON string +finetune_list_instance = FinetuneList.from_json(json) +# print the JSON string representation of the object +print(FinetuneList.to_json()) + +# convert the object into a dict +finetune_list_dict = finetune_list_instance.to_dict() +# create an instance of FinetuneList from a dict +finetune_list_from_dict = FinetuneList.from_dict(finetune_list_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneListEvents.md b/src/together/generated/docs/FinetuneListEvents.md new file mode 100644 index 00000000..2fa6ed43 --- /dev/null +++ b/src/together/generated/docs/FinetuneListEvents.md @@ -0,0 +1,27 @@ +# FinetuneListEvents + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | [**List[FineTuneEvent]**](FineTuneEvent.md) | | + +## Example + +```python +from together.generated.models.finetune_list_events import FinetuneListEvents + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneListEvents from a JSON string +finetune_list_events_instance = FinetuneListEvents.from_json(json) +# print the JSON string representation of the object +print(FinetuneListEvents.to_json()) + +# convert the object into a dict +finetune_list_events_dict = finetune_list_events_instance.to_dict() +# create an instance of FinetuneListEvents from a dict +finetune_list_events_from_dict = FinetuneListEvents.from_dict(finetune_list_events_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneResponse.md b/src/together/generated/docs/FinetuneResponse.md new file mode 100644 index 00000000..68dc10c6 --- /dev/null +++ b/src/together/generated/docs/FinetuneResponse.md @@ -0,0 +1,58 @@ +# FinetuneResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**training_file** | **str** | | [optional] +**validation_file** | **str** | | [optional] +**model** | **str** | | [optional] +**model_output_name** | **str** | | [optional] +**model_output_path** | **str** | | [optional] +**trainingfile_numlines** | **int** | | [optional] +**trainingfile_size** | **int** | | [optional] +**created_at** | **str** | | [optional] +**updated_at** | **str** | | [optional] +**n_epochs** | **int** | | [optional] +**n_checkpoints** | **int** | | [optional] +**n_evals** | **int** | | [optional] +**batch_size** | **int** | | [optional] +**learning_rate** | **float** | | [optional] +**lr_scheduler** | [**LRScheduler**](.md) | | [optional] +**warmup_ratio** | **float** | | [optional] +**max_grad_norm** | **float** | | [optional] +**weight_decay** | **float** | | [optional] +**eval_steps** | **int** | | [optional] +**train_on_inputs** | [**FinetuneResponseTrainOnInputs**](FinetuneResponseTrainOnInputs.md) | | [optional] +**training_type** | [**FineTunesPostRequestTrainingType**](FineTunesPostRequestTrainingType.md) | | [optional] +**status** | [**FinetuneJobStatus**](FinetuneJobStatus.md) | | +**job_id** | **str** | | [optional] +**events** | [**List[FineTuneEvent]**](FineTuneEvent.md) | | [optional] +**token_count** | **int** | | [optional] +**param_count** | **int** | | [optional] +**total_price** | **int** | | [optional] +**epochs_completed** | **int** | | [optional] +**queue_depth** | **int** | | [optional] +**wandb_project_name** | **str** | | [optional] +**wandb_url** | **str** | | [optional] + +## Example + +```python +from together.generated.models.finetune_response import FinetuneResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneResponse from a JSON string +finetune_response_instance = FinetuneResponse.from_json(json) +# print the JSON string representation of the object +print(FinetuneResponse.to_json()) + +# convert the object into a dict +finetune_response_dict = finetune_response_instance.to_dict() +# create an instance of FinetuneResponse from a dict +finetune_response_from_dict = FinetuneResponse.from_dict(finetune_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinetuneResponseTrainOnInputs.md b/src/together/generated/docs/FinetuneResponseTrainOnInputs.md new file mode 100644 index 00000000..0ea8e32c --- /dev/null +++ b/src/together/generated/docs/FinetuneResponseTrainOnInputs.md @@ -0,0 +1,26 @@ +# FinetuneResponseTrainOnInputs + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.finetune_response_train_on_inputs import FinetuneResponseTrainOnInputs + +# TODO update the JSON string below +json = "{}" +# create an instance of FinetuneResponseTrainOnInputs from a JSON string +finetune_response_train_on_inputs_instance = FinetuneResponseTrainOnInputs.from_json(json) +# print the JSON string representation of the object +print(FinetuneResponseTrainOnInputs.to_json()) + +# convert the object into a dict +finetune_response_train_on_inputs_dict = finetune_response_train_on_inputs_instance.to_dict() +# create an instance of FinetuneResponseTrainOnInputs from a dict +finetune_response_train_on_inputs_from_dict = FinetuneResponseTrainOnInputs.from_dict(finetune_response_train_on_inputs_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FinishReason.md b/src/together/generated/docs/FinishReason.md new file mode 100644 index 00000000..e6907d83 --- /dev/null +++ b/src/together/generated/docs/FinishReason.md @@ -0,0 +1,16 @@ +# FinishReason + + +## Enum + +* `STOP` (value: `'stop'`) + +* `EOS` (value: `'eos'`) + +* `LENGTH` (value: `'length'`) + +* `TOOL_CALLS` (value: `'tool_calls'`) + +* `FUNCTION_CALL` (value: `'function_call'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/FullTrainingType.md b/src/together/generated/docs/FullTrainingType.md new file mode 100644 index 00000000..4b40ee0f --- /dev/null +++ b/src/together/generated/docs/FullTrainingType.md @@ -0,0 +1,27 @@ +# FullTrainingType + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | + +## Example + +```python +from together.generated.models.full_training_type import FullTrainingType + +# TODO update the JSON string below +json = "{}" +# create an instance of FullTrainingType from a JSON string +full_training_type_instance = FullTrainingType.from_json(json) +# print the JSON string representation of the object +print(FullTrainingType.to_json()) + +# convert the object into a dict +full_training_type_dict = full_training_type_instance.to_dict() +# create an instance of FullTrainingType from a dict +full_training_type_from_dict = FullTrainingType.from_dict(full_training_type_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareApi.md b/src/together/generated/docs/HardwareApi.md new file mode 100644 index 00000000..b631d038 --- /dev/null +++ b/src/together/generated/docs/HardwareApi.md @@ -0,0 +1,88 @@ +# together.generated.HardwareApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**list_hardware**](HardwareApi.md#list_hardware) | **GET** /hardware | List available hardware configurations + + +# **list_hardware** +> ListHardware200Response list_hardware(model=model) + +List available hardware configurations + +Returns a list of available hardware configurations for deploying models. When a model parameter is provided, it returns only hardware configurations compatible with that model, including their current availability status. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.list_hardware200_response import ListHardware200Response +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.HardwareApi(api_client) + model = 'meta-llama/Llama-3-70b-chat-hf' # str | Filter hardware configurations by model compatibility (optional) + + try: + # List available hardware configurations + api_response = await api_instance.list_hardware(model=model) + print("The response of HardwareApi->list_hardware:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling HardwareApi->list_hardware: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **model** | **str**| Filter hardware configurations by model compatibility | [optional] + +### Return type + +[**ListHardware200Response**](ListHardware200Response.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | List of available hardware configurations | - | +**403** | Unauthorized | - | +**500** | Internal error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareAvailability.md b/src/together/generated/docs/HardwareAvailability.md new file mode 100644 index 00000000..6ff309ee --- /dev/null +++ b/src/together/generated/docs/HardwareAvailability.md @@ -0,0 +1,28 @@ +# HardwareAvailability + +Indicates the current availability status of a hardware configuration + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**status** | **str** | The availability status of the hardware configuration | + +## Example + +```python +from together.generated.models.hardware_availability import HardwareAvailability + +# TODO update the JSON string below +json = "{}" +# create an instance of HardwareAvailability from a JSON string +hardware_availability_instance = HardwareAvailability.from_json(json) +# print the JSON string representation of the object +print(HardwareAvailability.to_json()) + +# convert the object into a dict +hardware_availability_dict = hardware_availability_instance.to_dict() +# create an instance of HardwareAvailability from a dict +hardware_availability_from_dict = HardwareAvailability.from_dict(hardware_availability_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareSpec.md b/src/together/generated/docs/HardwareSpec.md new file mode 100644 index 00000000..9967c6f2 --- /dev/null +++ b/src/together/generated/docs/HardwareSpec.md @@ -0,0 +1,31 @@ +# HardwareSpec + +Detailed specifications of a hardware configuration + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**gpu_type** | **str** | The type/model of GPU | +**gpu_link** | **str** | The GPU interconnect technology | +**gpu_memory** | **float** | Amount of GPU memory in GB | +**gpu_count** | **int** | Number of GPUs in this configuration | + +## Example + +```python +from together.generated.models.hardware_spec import HardwareSpec + +# TODO update the JSON string below +json = "{}" +# create an instance of HardwareSpec from a JSON string +hardware_spec_instance = HardwareSpec.from_json(json) +# print the JSON string representation of the object +print(HardwareSpec.to_json()) + +# convert the object into a dict +hardware_spec_dict = hardware_spec_instance.to_dict() +# create an instance of HardwareSpec from a dict +hardware_spec_from_dict = HardwareSpec.from_dict(hardware_spec_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/HardwareWithStatus.md b/src/together/generated/docs/HardwareWithStatus.md new file mode 100644 index 00000000..6435273e --- /dev/null +++ b/src/together/generated/docs/HardwareWithStatus.md @@ -0,0 +1,33 @@ +# HardwareWithStatus + +Hardware configuration details including current availability status + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**name** | **str** | Unique identifier for the hardware configuration | +**pricing** | [**EndpointPricing**](EndpointPricing.md) | | +**specs** | [**HardwareSpec**](HardwareSpec.md) | | +**availability** | [**HardwareAvailability**](HardwareAvailability.md) | | [optional] +**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | + +## Example + +```python +from together.generated.models.hardware_with_status import HardwareWithStatus + +# TODO update the JSON string below +json = "{}" +# create an instance of HardwareWithStatus from a JSON string +hardware_with_status_instance = HardwareWithStatus.from_json(json) +# print the JSON string representation of the object +print(HardwareWithStatus.to_json()) + +# convert the object into a dict +hardware_with_status_dict = hardware_with_status_instance.to_dict() +# create an instance of HardwareWithStatus from a dict +hardware_with_status_from_dict = HardwareWithStatus.from_dict(hardware_with_status_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImageResponse.md b/src/together/generated/docs/ImageResponse.md new file mode 100644 index 00000000..eca8ec0c --- /dev/null +++ b/src/together/generated/docs/ImageResponse.md @@ -0,0 +1,30 @@ +# ImageResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**model** | **str** | | +**object** | **str** | | +**data** | [**List[ImageResponseDataInner]**](ImageResponseDataInner.md) | | + +## Example + +```python +from together.generated.models.image_response import ImageResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of ImageResponse from a JSON string +image_response_instance = ImageResponse.from_json(json) +# print the JSON string representation of the object +print(ImageResponse.to_json()) + +# convert the object into a dict +image_response_dict = image_response_instance.to_dict() +# create an instance of ImageResponse from a dict +image_response_from_dict = ImageResponse.from_dict(image_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImageResponseDataInner.md b/src/together/generated/docs/ImageResponseDataInner.md new file mode 100644 index 00000000..f529b63c --- /dev/null +++ b/src/together/generated/docs/ImageResponseDataInner.md @@ -0,0 +1,29 @@ +# ImageResponseDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **int** | | +**b64_json** | **str** | | [optional] +**url** | **str** | | [optional] + +## Example + +```python +from together.generated.models.image_response_data_inner import ImageResponseDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ImageResponseDataInner from a JSON string +image_response_data_inner_instance = ImageResponseDataInner.from_json(json) +# print the JSON string representation of the object +print(ImageResponseDataInner.to_json()) + +# convert the object into a dict +image_response_data_inner_dict = image_response_data_inner_instance.to_dict() +# create an instance of ImageResponseDataInner from a dict +image_response_data_inner_from_dict = ImageResponseDataInner.from_dict(image_response_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesApi.md b/src/together/generated/docs/ImagesApi.md new file mode 100644 index 00000000..d7db520e --- /dev/null +++ b/src/together/generated/docs/ImagesApi.md @@ -0,0 +1,87 @@ +# together.generated.ImagesApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**images_generations_post**](ImagesApi.md#images_generations_post) | **POST** /images/generations | Create image + + +# **images_generations_post** +> ImageResponse images_generations_post(images_generations_post_request) + +Create image + +Use an image model to generate an image for a given prompt. + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.image_response import ImageResponse +from together.generated.models.images_generations_post_request import ImagesGenerationsPostRequest +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.ImagesApi(api_client) + images_generations_post_request = together.generated.ImagesGenerationsPostRequest() # ImagesGenerationsPostRequest | + + try: + # Create image + api_response = await api_instance.images_generations_post(images_generations_post_request) + print("The response of ImagesApi->images_generations_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling ImagesApi->images_generations_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **images_generations_post_request** | [**ImagesGenerationsPostRequest**](ImagesGenerationsPostRequest.md)| | + +### Return type + +[**ImageResponse**](ImageResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | Image generated successfully | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequest.md b/src/together/generated/docs/ImagesGenerationsPostRequest.md new file mode 100644 index 00000000..4263429b --- /dev/null +++ b/src/together/generated/docs/ImagesGenerationsPostRequest.md @@ -0,0 +1,39 @@ +# ImagesGenerationsPostRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**prompt** | **str** | A description of the desired images. Maximum length varies by model. | +**model** | [**ImagesGenerationsPostRequestModel**](ImagesGenerationsPostRequestModel.md) | | +**steps** | **int** | Number of generation steps. | [optional] [default to 20] +**image_url** | **str** | URL of an image to use for image models that support it. | [optional] +**seed** | **int** | Seed used for generation. Can be used to reproduce image generations. | [optional] +**n** | **int** | Number of image results to generate. | [optional] [default to 1] +**height** | **int** | Height of the image to generate in number of pixels. | [optional] [default to 1024] +**width** | **int** | Width of the image to generate in number of pixels. | [optional] [default to 1024] +**negative_prompt** | **str** | The prompt or prompts not to guide the image generation. | [optional] +**response_format** | **str** | Format of the image response. Can be either a base64 string or a URL. | [optional] +**guidance** | **float** | Adjusts the alignment of the generated image with the input prompt. Higher values (e.g., 8-10) make the output more faithful to the prompt, while lower values (e.g., 1-5) encourage more creative freedom. | [optional] [default to 3.5] +**output_format** | **str** | The format of the image response. Can be either be `jpeg` or `png`. Defaults to `jpeg`. | [optional] [default to 'jpeg'] +**image_loras** | [**List[ImagesGenerationsPostRequestImageLorasInner]**](ImagesGenerationsPostRequestImageLorasInner.md) | An array of objects that define LoRAs (Low-Rank Adaptations) to influence the generated image. | [optional] + +## Example + +```python +from together.generated.models.images_generations_post_request import ImagesGenerationsPostRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of ImagesGenerationsPostRequest from a JSON string +images_generations_post_request_instance = ImagesGenerationsPostRequest.from_json(json) +# print the JSON string representation of the object +print(ImagesGenerationsPostRequest.to_json()) + +# convert the object into a dict +images_generations_post_request_dict = images_generations_post_request_instance.to_dict() +# create an instance of ImagesGenerationsPostRequest from a dict +images_generations_post_request_from_dict = ImagesGenerationsPostRequest.from_dict(images_generations_post_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md b/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md new file mode 100644 index 00000000..6bb54e5e --- /dev/null +++ b/src/together/generated/docs/ImagesGenerationsPostRequestImageLorasInner.md @@ -0,0 +1,28 @@ +# ImagesGenerationsPostRequestImageLorasInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**path** | **str** | The URL of the LoRA to apply (e.g. https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA). | +**scale** | **float** | The strength of the LoRA's influence. Most LoRA's recommend a value of 1. | + +## Example + +```python +from together.generated.models.images_generations_post_request_image_loras_inner import ImagesGenerationsPostRequestImageLorasInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ImagesGenerationsPostRequestImageLorasInner from a JSON string +images_generations_post_request_image_loras_inner_instance = ImagesGenerationsPostRequestImageLorasInner.from_json(json) +# print the JSON string representation of the object +print(ImagesGenerationsPostRequestImageLorasInner.to_json()) + +# convert the object into a dict +images_generations_post_request_image_loras_inner_dict = images_generations_post_request_image_loras_inner_instance.to_dict() +# create an instance of ImagesGenerationsPostRequestImageLorasInner from a dict +images_generations_post_request_image_loras_inner_from_dict = ImagesGenerationsPostRequestImageLorasInner.from_dict(images_generations_post_request_image_loras_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ImagesGenerationsPostRequestModel.md b/src/together/generated/docs/ImagesGenerationsPostRequestModel.md new file mode 100644 index 00000000..333020df --- /dev/null +++ b/src/together/generated/docs/ImagesGenerationsPostRequestModel.md @@ -0,0 +1,27 @@ +# ImagesGenerationsPostRequestModel + +The model to use for image generation.

[See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.images_generations_post_request_model import ImagesGenerationsPostRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of ImagesGenerationsPostRequestModel from a JSON string +images_generations_post_request_model_instance = ImagesGenerationsPostRequestModel.from_json(json) +# print the JSON string representation of the object +print(ImagesGenerationsPostRequestModel.to_json()) + +# convert the object into a dict +images_generations_post_request_model_dict = images_generations_post_request_model_instance.to_dict() +# create an instance of ImagesGenerationsPostRequestModel from a dict +images_generations_post_request_model_from_dict = ImagesGenerationsPostRequestModel.from_dict(images_generations_post_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LRScheduler.md b/src/together/generated/docs/LRScheduler.md new file mode 100644 index 00000000..6580bafd --- /dev/null +++ b/src/together/generated/docs/LRScheduler.md @@ -0,0 +1,28 @@ +# LRScheduler + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**lr_scheduler_type** | **str** | | +**lr_scheduler_args** | [**LinearLRSchedulerArgs**](.md) | | [optional] + +## Example + +```python +from together.generated.models.lr_scheduler import LRScheduler + +# TODO update the JSON string below +json = "{}" +# create an instance of LRScheduler from a JSON string +lr_scheduler_instance = LRScheduler.from_json(json) +# print the JSON string representation of the object +print(LRScheduler.to_json()) + +# convert the object into a dict +lr_scheduler_dict = lr_scheduler_instance.to_dict() +# create an instance of LRScheduler from a dict +lr_scheduler_from_dict = LRScheduler.from_dict(lr_scheduler_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LinearLRSchedulerArgs.md b/src/together/generated/docs/LinearLRSchedulerArgs.md new file mode 100644 index 00000000..82240c87 --- /dev/null +++ b/src/together/generated/docs/LinearLRSchedulerArgs.md @@ -0,0 +1,27 @@ +# LinearLRSchedulerArgs + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**min_lr_ratio** | **float** | The ratio of the final learning rate to the peak learning rate | [optional] [default to 0.0] + +## Example + +```python +from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs + +# TODO update the JSON string below +json = "{}" +# create an instance of LinearLRSchedulerArgs from a JSON string +linear_lr_scheduler_args_instance = LinearLRSchedulerArgs.from_json(json) +# print the JSON string representation of the object +print(LinearLRSchedulerArgs.to_json()) + +# convert the object into a dict +linear_lr_scheduler_args_dict = linear_lr_scheduler_args_instance.to_dict() +# create an instance of LinearLRSchedulerArgs from a dict +linear_lr_scheduler_args_from_dict = LinearLRSchedulerArgs.from_dict(linear_lr_scheduler_args_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListEndpoint.md b/src/together/generated/docs/ListEndpoint.md new file mode 100644 index 00000000..c7a226ea --- /dev/null +++ b/src/together/generated/docs/ListEndpoint.md @@ -0,0 +1,35 @@ +# ListEndpoint + +Details about an endpoint when listed via the list endpoint + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | The type of object | +**id** | **str** | Unique identifier for the endpoint | +**name** | **str** | System name for the endpoint | +**model** | **str** | The model deployed on this endpoint | +**type** | **str** | The type of endpoint | +**owner** | **str** | The owner of this endpoint | +**state** | **str** | Current state of the endpoint | +**created_at** | **datetime** | Timestamp when the endpoint was created | + +## Example + +```python +from together.generated.models.list_endpoint import ListEndpoint + +# TODO update the JSON string below +json = "{}" +# create an instance of ListEndpoint from a JSON string +list_endpoint_instance = ListEndpoint.from_json(json) +# print the JSON string representation of the object +print(ListEndpoint.to_json()) + +# convert the object into a dict +list_endpoint_dict = list_endpoint_instance.to_dict() +# create an instance of ListEndpoint from a dict +list_endpoint_from_dict = ListEndpoint.from_dict(list_endpoint_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListEndpoints200Response.md b/src/together/generated/docs/ListEndpoints200Response.md new file mode 100644 index 00000000..16babfb5 --- /dev/null +++ b/src/together/generated/docs/ListEndpoints200Response.md @@ -0,0 +1,28 @@ +# ListEndpoints200Response + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**data** | [**List[ListEndpoint]**](ListEndpoint.md) | | + +## Example + +```python +from together.generated.models.list_endpoints200_response import ListEndpoints200Response + +# TODO update the JSON string below +json = "{}" +# create an instance of ListEndpoints200Response from a JSON string +list_endpoints200_response_instance = ListEndpoints200Response.from_json(json) +# print the JSON string representation of the object +print(ListEndpoints200Response.to_json()) + +# convert the object into a dict +list_endpoints200_response_dict = list_endpoints200_response_instance.to_dict() +# create an instance of ListEndpoints200Response from a dict +list_endpoints200_response_from_dict = ListEndpoints200Response.from_dict(list_endpoints200_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200Response.md b/src/together/generated/docs/ListHardware200Response.md new file mode 100644 index 00000000..7621d170 --- /dev/null +++ b/src/together/generated/docs/ListHardware200Response.md @@ -0,0 +1,28 @@ +# ListHardware200Response + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**data** | [**List[ListHardware200ResponseOneOf1DataInner]**](ListHardware200ResponseOneOf1DataInner.md) | | + +## Example + +```python +from together.generated.models.list_hardware200_response import ListHardware200Response + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200Response from a JSON string +list_hardware200_response_instance = ListHardware200Response.from_json(json) +# print the JSON string representation of the object +print(ListHardware200Response.to_json()) + +# convert the object into a dict +list_hardware200_response_dict = list_hardware200_response_instance.to_dict() +# create an instance of ListHardware200Response from a dict +list_hardware200_response_from_dict = ListHardware200Response.from_dict(list_hardware200_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf.md b/src/together/generated/docs/ListHardware200ResponseOneOf.md new file mode 100644 index 00000000..4c4472d7 --- /dev/null +++ b/src/together/generated/docs/ListHardware200ResponseOneOf.md @@ -0,0 +1,29 @@ +# ListHardware200ResponseOneOf + +Response when no model filter is provided + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**data** | [**List[ListHardware200ResponseOneOfDataInner]**](ListHardware200ResponseOneOfDataInner.md) | | + +## Example + +```python +from together.generated.models.list_hardware200_response_one_of import ListHardware200ResponseOneOf + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200ResponseOneOf from a JSON string +list_hardware200_response_one_of_instance = ListHardware200ResponseOneOf.from_json(json) +# print the JSON string representation of the object +print(ListHardware200ResponseOneOf.to_json()) + +# convert the object into a dict +list_hardware200_response_one_of_dict = list_hardware200_response_one_of_instance.to_dict() +# create an instance of ListHardware200ResponseOneOf from a dict +list_hardware200_response_one_of_from_dict = ListHardware200ResponseOneOf.from_dict(list_hardware200_response_one_of_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf1.md b/src/together/generated/docs/ListHardware200ResponseOneOf1.md new file mode 100644 index 00000000..e93eeca5 --- /dev/null +++ b/src/together/generated/docs/ListHardware200ResponseOneOf1.md @@ -0,0 +1,29 @@ +# ListHardware200ResponseOneOf1 + +Response when model filter is provided + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**data** | [**List[ListHardware200ResponseOneOf1DataInner]**](ListHardware200ResponseOneOf1DataInner.md) | | + +## Example + +```python +from together.generated.models.list_hardware200_response_one_of1 import ListHardware200ResponseOneOf1 + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200ResponseOneOf1 from a JSON string +list_hardware200_response_one_of1_instance = ListHardware200ResponseOneOf1.from_json(json) +# print the JSON string representation of the object +print(ListHardware200ResponseOneOf1.to_json()) + +# convert the object into a dict +list_hardware200_response_one_of1_dict = list_hardware200_response_one_of1_instance.to_dict() +# create an instance of ListHardware200ResponseOneOf1 from a dict +list_hardware200_response_one_of1_from_dict = ListHardware200ResponseOneOf1.from_dict(list_hardware200_response_one_of1_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md b/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md new file mode 100644 index 00000000..ffe4d491 --- /dev/null +++ b/src/together/generated/docs/ListHardware200ResponseOneOf1DataInner.md @@ -0,0 +1,32 @@ +# ListHardware200ResponseOneOf1DataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**name** | **str** | Unique identifier for the hardware configuration | +**pricing** | [**EndpointPricing**](EndpointPricing.md) | | +**specs** | [**HardwareSpec**](HardwareSpec.md) | | +**availability** | [**HardwareAvailability**](HardwareAvailability.md) | | +**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | + +## Example + +```python +from together.generated.models.list_hardware200_response_one_of1_data_inner import ListHardware200ResponseOneOf1DataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200ResponseOneOf1DataInner from a JSON string +list_hardware200_response_one_of1_data_inner_instance = ListHardware200ResponseOneOf1DataInner.from_json(json) +# print the JSON string representation of the object +print(ListHardware200ResponseOneOf1DataInner.to_json()) + +# convert the object into a dict +list_hardware200_response_one_of1_data_inner_dict = list_hardware200_response_one_of1_data_inner_instance.to_dict() +# create an instance of ListHardware200ResponseOneOf1DataInner from a dict +list_hardware200_response_one_of1_data_inner_from_dict = ListHardware200ResponseOneOf1DataInner.from_dict(list_hardware200_response_one_of1_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md b/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md new file mode 100644 index 00000000..75586f0d --- /dev/null +++ b/src/together/generated/docs/ListHardware200ResponseOneOfDataInner.md @@ -0,0 +1,32 @@ +# ListHardware200ResponseOneOfDataInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | | +**name** | **str** | Unique identifier for the hardware configuration | +**pricing** | [**EndpointPricing**](EndpointPricing.md) | | +**specs** | [**HardwareSpec**](HardwareSpec.md) | | +**availability** | **object** | | [optional] +**updated_at** | **datetime** | Timestamp of when the hardware status was last updated | + +## Example + +```python +from together.generated.models.list_hardware200_response_one_of_data_inner import ListHardware200ResponseOneOfDataInner + +# TODO update the JSON string below +json = "{}" +# create an instance of ListHardware200ResponseOneOfDataInner from a JSON string +list_hardware200_response_one_of_data_inner_instance = ListHardware200ResponseOneOfDataInner.from_json(json) +# print the JSON string representation of the object +print(ListHardware200ResponseOneOfDataInner.to_json()) + +# convert the object into a dict +list_hardware200_response_one_of_data_inner_dict = list_hardware200_response_one_of_data_inner_instance.to_dict() +# create an instance of ListHardware200ResponseOneOfDataInner from a dict +list_hardware200_response_one_of_data_inner_from_dict = ListHardware200ResponseOneOfDataInner.from_dict(list_hardware200_response_one_of_data_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LoRATrainingType.md b/src/together/generated/docs/LoRATrainingType.md new file mode 100644 index 00000000..e977d18b --- /dev/null +++ b/src/together/generated/docs/LoRATrainingType.md @@ -0,0 +1,31 @@ +# LoRATrainingType + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | +**lora_r** | **int** | | +**lora_alpha** | **int** | | +**lora_dropout** | **float** | | [optional] [default to 0.0] +**lora_trainable_modules** | **str** | | [optional] [default to 'all-linear'] + +## Example + +```python +from together.generated.models.lo_ra_training_type import LoRATrainingType + +# TODO update the JSON string below +json = "{}" +# create an instance of LoRATrainingType from a JSON string +lo_ra_training_type_instance = LoRATrainingType.from_json(json) +# print the JSON string representation of the object +print(LoRATrainingType.to_json()) + +# convert the object into a dict +lo_ra_training_type_dict = lo_ra_training_type_instance.to_dict() +# create an instance of LoRATrainingType from a dict +lo_ra_training_type_from_dict = LoRATrainingType.from_dict(lo_ra_training_type_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/LogprobsPart.md b/src/together/generated/docs/LogprobsPart.md new file mode 100644 index 00000000..d489ad95 --- /dev/null +++ b/src/together/generated/docs/LogprobsPart.md @@ -0,0 +1,29 @@ +# LogprobsPart + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**token_ids** | **List[float]** | List of token IDs corresponding to the logprobs | [optional] +**tokens** | **List[str]** | List of token strings | [optional] +**token_logprobs** | **List[float]** | List of token log probabilities | [optional] + +## Example + +```python +from together.generated.models.logprobs_part import LogprobsPart + +# TODO update the JSON string below +json = "{}" +# create an instance of LogprobsPart from a JSON string +logprobs_part_instance = LogprobsPart.from_json(json) +# print the JSON string representation of the object +print(LogprobsPart.to_json()) + +# convert the object into a dict +logprobs_part_dict = logprobs_part_instance.to_dict() +# create an instance of LogprobsPart from a dict +logprobs_part_from_dict = LogprobsPart.from_dict(logprobs_part_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ModelInfo.md b/src/together/generated/docs/ModelInfo.md new file mode 100644 index 00000000..06bac6fe --- /dev/null +++ b/src/together/generated/docs/ModelInfo.md @@ -0,0 +1,36 @@ +# ModelInfo + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **str** | | +**object** | **str** | | +**created** | **int** | | +**type** | **str** | | +**display_name** | **str** | | [optional] +**organization** | **str** | | [optional] +**link** | **str** | | [optional] +**license** | **str** | | [optional] +**context_length** | **int** | | [optional] +**pricing** | [**Pricing**](Pricing.md) | | [optional] + +## Example + +```python +from together.generated.models.model_info import ModelInfo + +# TODO update the JSON string below +json = "{}" +# create an instance of ModelInfo from a JSON string +model_info_instance = ModelInfo.from_json(json) +# print the JSON string representation of the object +print(ModelInfo.to_json()) + +# convert the object into a dict +model_info_dict = model_info_instance.to_dict() +# create an instance of ModelInfo from a dict +model_info_from_dict = ModelInfo.from_dict(model_info_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ModelsApi.md b/src/together/generated/docs/ModelsApi.md new file mode 100644 index 00000000..d5584a59 --- /dev/null +++ b/src/together/generated/docs/ModelsApi.md @@ -0,0 +1,87 @@ +# together.generated.ModelsApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**models**](ModelsApi.md#models) | **GET** /models | List all models + + +# **models** +> List[ModelInfo] models() + +List all models + +Lists all of Together's open-source models + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.model_info import ModelInfo +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.ModelsApi(api_client) + + try: + # List all models + api_response = await api_instance.models() + print("The response of ModelsApi->models:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling ModelsApi->models: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**List[ModelInfo]**](ModelInfo.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/Pricing.md b/src/together/generated/docs/Pricing.md new file mode 100644 index 00000000..24a5d7b8 --- /dev/null +++ b/src/together/generated/docs/Pricing.md @@ -0,0 +1,31 @@ +# Pricing + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**hourly** | **float** | | +**input** | **float** | | +**output** | **float** | | +**base** | **float** | | +**finetune** | **float** | | + +## Example + +```python +from together.generated.models.pricing import Pricing + +# TODO update the JSON string below +json = "{}" +# create an instance of Pricing from a JSON string +pricing_instance = Pricing.from_json(json) +# print the JSON string representation of the object +print(Pricing.to_json()) + +# convert the object into a dict +pricing_dict = pricing_instance.to_dict() +# create an instance of Pricing from a dict +pricing_from_dict = Pricing.from_dict(pricing_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/PromptPartInner.md b/src/together/generated/docs/PromptPartInner.md new file mode 100644 index 00000000..e1270712 --- /dev/null +++ b/src/together/generated/docs/PromptPartInner.md @@ -0,0 +1,28 @@ +# PromptPartInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] +**logprobs** | [**LogprobsPart**](LogprobsPart.md) | | [optional] + +## Example + +```python +from together.generated.models.prompt_part_inner import PromptPartInner + +# TODO update the JSON string below +json = "{}" +# create an instance of PromptPartInner from a JSON string +prompt_part_inner_instance = PromptPartInner.from_json(json) +# print the JSON string representation of the object +print(PromptPartInner.to_json()) + +# convert the object into a dict +prompt_part_inner_dict = prompt_part_inner_instance.to_dict() +# create an instance of PromptPartInner from a dict +prompt_part_inner_from_dict = PromptPartInner.from_dict(prompt_part_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankApi.md b/src/together/generated/docs/RerankApi.md new file mode 100644 index 00000000..2990c1f9 --- /dev/null +++ b/src/together/generated/docs/RerankApi.md @@ -0,0 +1,93 @@ +# together.generated.RerankApi + +All URIs are relative to *https://api.together.xyz/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**rerank**](RerankApi.md#rerank) | **POST** /rerank | Create a rerank request + + +# **rerank** +> RerankResponse rerank(rerank_request=rerank_request) + +Create a rerank request + +Query a reranker model + +### Example + +* Bearer Authentication (bearerAuth): + +```python +import together.generated +from together.generated.models.rerank_request import RerankRequest +from together.generated.models.rerank_response import RerankResponse +from together.generated.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to https://api.together.xyz/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = together.generated.Configuration( + host = "https://api.together.xyz/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure Bearer authorization: bearerAuth +configuration = together.generated.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +async with together.generated.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = together.generated.RerankApi(api_client) + rerank_request = together.generated.RerankRequest() # RerankRequest | (optional) + + try: + # Create a rerank request + api_response = await api_instance.rerank(rerank_request=rerank_request) + print("The response of RerankApi->rerank:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling RerankApi->rerank: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **rerank_request** | [**RerankRequest**](RerankRequest.md)| | [optional] + +### Return type + +[**RerankResponse**](RerankResponse.md) + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | 200 | - | +**400** | BadRequest | - | +**401** | Unauthorized | - | +**404** | NotFound | - | +**429** | RateLimit | - | +**503** | Overloaded | - | +**504** | Timeout | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequest.md b/src/together/generated/docs/RerankRequest.md new file mode 100644 index 00000000..4c7ce08a --- /dev/null +++ b/src/together/generated/docs/RerankRequest.md @@ -0,0 +1,32 @@ +# RerankRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**model** | [**RerankRequestModel**](RerankRequestModel.md) | | +**query** | **str** | The search query to be used for ranking. | +**documents** | [**RerankRequestDocuments**](RerankRequestDocuments.md) | | +**top_n** | **int** | The number of top results to return. | [optional] +**return_documents** | **bool** | Whether to return supplied documents with the response. | [optional] +**rank_fields** | **List[str]** | List of keys in the JSON Object document to rank by. Defaults to use all supplied keys for ranking. | [optional] + +## Example + +```python +from together.generated.models.rerank_request import RerankRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankRequest from a JSON string +rerank_request_instance = RerankRequest.from_json(json) +# print the JSON string representation of the object +print(RerankRequest.to_json()) + +# convert the object into a dict +rerank_request_dict = rerank_request_instance.to_dict() +# create an instance of RerankRequest from a dict +rerank_request_from_dict = RerankRequest.from_dict(rerank_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequestDocuments.md b/src/together/generated/docs/RerankRequestDocuments.md new file mode 100644 index 00000000..51411574 --- /dev/null +++ b/src/together/generated/docs/RerankRequestDocuments.md @@ -0,0 +1,27 @@ +# RerankRequestDocuments + +List of documents, which can be either strings or objects. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.rerank_request_documents import RerankRequestDocuments + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankRequestDocuments from a JSON string +rerank_request_documents_instance = RerankRequestDocuments.from_json(json) +# print the JSON string representation of the object +print(RerankRequestDocuments.to_json()) + +# convert the object into a dict +rerank_request_documents_dict = rerank_request_documents_instance.to_dict() +# create an instance of RerankRequestDocuments from a dict +rerank_request_documents_from_dict = RerankRequestDocuments.from_dict(rerank_request_documents_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankRequestModel.md b/src/together/generated/docs/RerankRequestModel.md new file mode 100644 index 00000000..8d99f7bd --- /dev/null +++ b/src/together/generated/docs/RerankRequestModel.md @@ -0,0 +1,27 @@ +# RerankRequestModel + +The model to be used for the rerank request.

[See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models) + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```python +from together.generated.models.rerank_request_model import RerankRequestModel + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankRequestModel from a JSON string +rerank_request_model_instance = RerankRequestModel.from_json(json) +# print the JSON string representation of the object +print(RerankRequestModel.to_json()) + +# convert the object into a dict +rerank_request_model_dict = rerank_request_model_instance.to_dict() +# create an instance of RerankRequestModel from a dict +rerank_request_model_from_dict = RerankRequestModel.from_dict(rerank_request_model_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponse.md b/src/together/generated/docs/RerankResponse.md new file mode 100644 index 00000000..a40aa152 --- /dev/null +++ b/src/together/generated/docs/RerankResponse.md @@ -0,0 +1,31 @@ +# RerankResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**object** | **str** | Object type | +**id** | **str** | Request ID | [optional] +**model** | **str** | The model to be used for the rerank request. | +**results** | [**List[RerankResponseResultsInner]**](RerankResponseResultsInner.md) | | +**usage** | [**UsageData**](UsageData.md) | | [optional] + +## Example + +```python +from together.generated.models.rerank_response import RerankResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankResponse from a JSON string +rerank_response_instance = RerankResponse.from_json(json) +# print the JSON string representation of the object +print(RerankResponse.to_json()) + +# convert the object into a dict +rerank_response_dict = rerank_response_instance.to_dict() +# create an instance of RerankResponse from a dict +rerank_response_from_dict = RerankResponse.from_dict(rerank_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponseResultsInner.md b/src/together/generated/docs/RerankResponseResultsInner.md new file mode 100644 index 00000000..0f245895 --- /dev/null +++ b/src/together/generated/docs/RerankResponseResultsInner.md @@ -0,0 +1,29 @@ +# RerankResponseResultsInner + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **int** | | +**relevance_score** | **float** | | +**document** | [**RerankResponseResultsInnerDocument**](RerankResponseResultsInnerDocument.md) | | + +## Example + +```python +from together.generated.models.rerank_response_results_inner import RerankResponseResultsInner + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankResponseResultsInner from a JSON string +rerank_response_results_inner_instance = RerankResponseResultsInner.from_json(json) +# print the JSON string representation of the object +print(RerankResponseResultsInner.to_json()) + +# convert the object into a dict +rerank_response_results_inner_dict = rerank_response_results_inner_instance.to_dict() +# create an instance of RerankResponseResultsInner from a dict +rerank_response_results_inner_from_dict = RerankResponseResultsInner.from_dict(rerank_response_results_inner_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/RerankResponseResultsInnerDocument.md b/src/together/generated/docs/RerankResponseResultsInnerDocument.md new file mode 100644 index 00000000..75ea6439 --- /dev/null +++ b/src/together/generated/docs/RerankResponseResultsInnerDocument.md @@ -0,0 +1,27 @@ +# RerankResponseResultsInnerDocument + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **str** | | [optional] + +## Example + +```python +from together.generated.models.rerank_response_results_inner_document import RerankResponseResultsInnerDocument + +# TODO update the JSON string below +json = "{}" +# create an instance of RerankResponseResultsInnerDocument from a JSON string +rerank_response_results_inner_document_instance = RerankResponseResultsInnerDocument.from_json(json) +# print the JSON string representation of the object +print(RerankResponseResultsInnerDocument.to_json()) + +# convert the object into a dict +rerank_response_results_inner_document_dict = rerank_response_results_inner_document_instance.to_dict() +# create an instance of RerankResponseResultsInnerDocument from a dict +rerank_response_results_inner_document_from_dict = RerankResponseResultsInnerDocument.from_dict(rerank_response_results_inner_document_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/StreamSentinel.md b/src/together/generated/docs/StreamSentinel.md new file mode 100644 index 00000000..aeb5f6f4 --- /dev/null +++ b/src/together/generated/docs/StreamSentinel.md @@ -0,0 +1,27 @@ +# StreamSentinel + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**data** | **str** | | + +## Example + +```python +from together.generated.models.stream_sentinel import StreamSentinel + +# TODO update the JSON string below +json = "{}" +# create an instance of StreamSentinel from a JSON string +stream_sentinel_instance = StreamSentinel.from_json(json) +# print the JSON string representation of the object +print(StreamSentinel.to_json()) + +# convert the object into a dict +stream_sentinel_dict = stream_sentinel_instance.to_dict() +# create an instance of StreamSentinel from a dict +stream_sentinel_from_dict = StreamSentinel.from_dict(stream_sentinel_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolChoice.md b/src/together/generated/docs/ToolChoice.md new file mode 100644 index 00000000..e0d6a775 --- /dev/null +++ b/src/together/generated/docs/ToolChoice.md @@ -0,0 +1,30 @@ +# ToolChoice + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**index** | **float** | | +**id** | **str** | | +**type** | **str** | | +**function** | [**ToolChoiceFunction**](ToolChoiceFunction.md) | | + +## Example + +```python +from together.generated.models.tool_choice import ToolChoice + +# TODO update the JSON string below +json = "{}" +# create an instance of ToolChoice from a JSON string +tool_choice_instance = ToolChoice.from_json(json) +# print the JSON string representation of the object +print(ToolChoice.to_json()) + +# convert the object into a dict +tool_choice_dict = tool_choice_instance.to_dict() +# create an instance of ToolChoice from a dict +tool_choice_from_dict = ToolChoice.from_dict(tool_choice_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolChoiceFunction.md b/src/together/generated/docs/ToolChoiceFunction.md new file mode 100644 index 00000000..a740c34e --- /dev/null +++ b/src/together/generated/docs/ToolChoiceFunction.md @@ -0,0 +1,28 @@ +# ToolChoiceFunction + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | +**arguments** | **str** | | + +## Example + +```python +from together.generated.models.tool_choice_function import ToolChoiceFunction + +# TODO update the JSON string below +json = "{}" +# create an instance of ToolChoiceFunction from a JSON string +tool_choice_function_instance = ToolChoiceFunction.from_json(json) +# print the JSON string representation of the object +print(ToolChoiceFunction.to_json()) + +# convert the object into a dict +tool_choice_function_dict = tool_choice_function_instance.to_dict() +# create an instance of ToolChoiceFunction from a dict +tool_choice_function_from_dict = ToolChoiceFunction.from_dict(tool_choice_function_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolsPart.md b/src/together/generated/docs/ToolsPart.md new file mode 100644 index 00000000..733e311e --- /dev/null +++ b/src/together/generated/docs/ToolsPart.md @@ -0,0 +1,28 @@ +# ToolsPart + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**type** | **str** | | [optional] +**function** | [**ToolsPartFunction**](ToolsPartFunction.md) | | [optional] + +## Example + +```python +from together.generated.models.tools_part import ToolsPart + +# TODO update the JSON string below +json = "{}" +# create an instance of ToolsPart from a JSON string +tools_part_instance = ToolsPart.from_json(json) +# print the JSON string representation of the object +print(ToolsPart.to_json()) + +# convert the object into a dict +tools_part_dict = tools_part_instance.to_dict() +# create an instance of ToolsPart from a dict +tools_part_from_dict = ToolsPart.from_dict(tools_part_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/ToolsPartFunction.md b/src/together/generated/docs/ToolsPartFunction.md new file mode 100644 index 00000000..27d59e43 --- /dev/null +++ b/src/together/generated/docs/ToolsPartFunction.md @@ -0,0 +1,29 @@ +# ToolsPartFunction + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**description** | **str** | | [optional] +**name** | **str** | | [optional] +**parameters** | **Dict[str, object]** | A map of parameter names to their values. | [optional] + +## Example + +```python +from together.generated.models.tools_part_function import ToolsPartFunction + +# TODO update the JSON string below +json = "{}" +# create an instance of ToolsPartFunction from a JSON string +tools_part_function_instance = ToolsPartFunction.from_json(json) +# print the JSON string representation of the object +print(ToolsPartFunction.to_json()) + +# convert the object into a dict +tools_part_function_dict = tools_part_function_instance.to_dict() +# create an instance of ToolsPartFunction from a dict +tools_part_function_from_dict = ToolsPartFunction.from_dict(tools_part_function_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/UpdateEndpointRequest.md b/src/together/generated/docs/UpdateEndpointRequest.md new file mode 100644 index 00000000..76fb7a8e --- /dev/null +++ b/src/together/generated/docs/UpdateEndpointRequest.md @@ -0,0 +1,29 @@ +# UpdateEndpointRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**display_name** | **str** | A human-readable name for the endpoint | [optional] +**state** | **str** | The desired state of the endpoint | [optional] +**autoscaling** | [**Autoscaling**](Autoscaling.md) | New autoscaling configuration for the endpoint | [optional] + +## Example + +```python +from together.generated.models.update_endpoint_request import UpdateEndpointRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of UpdateEndpointRequest from a JSON string +update_endpoint_request_instance = UpdateEndpointRequest.from_json(json) +# print the JSON string representation of the object +print(UpdateEndpointRequest.to_json()) + +# convert the object into a dict +update_endpoint_request_dict = update_endpoint_request_instance.to_dict() +# create an instance of UpdateEndpointRequest from a dict +update_endpoint_request_from_dict = UpdateEndpointRequest.from_dict(update_endpoint_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/docs/UsageData.md b/src/together/generated/docs/UsageData.md new file mode 100644 index 00000000..0a0f4692 --- /dev/null +++ b/src/together/generated/docs/UsageData.md @@ -0,0 +1,29 @@ +# UsageData + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**prompt_tokens** | **int** | | +**completion_tokens** | **int** | | +**total_tokens** | **int** | | + +## Example + +```python +from together.generated.models.usage_data import UsageData + +# TODO update the JSON string below +json = "{}" +# create an instance of UsageData from a JSON string +usage_data_instance = UsageData.from_json(json) +# print the JSON string representation of the object +print(UsageData.to_json()) + +# convert the object into a dict +usage_data_dict = usage_data_instance.to_dict() +# create an instance of UsageData from a dict +usage_data_from_dict = UsageData.from_dict(usage_data_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/src/together/generated/exceptions.py b/src/together/generated/exceptions.py new file mode 100644 index 00000000..ade3cc31 --- /dev/null +++ b/src/together/generated/exceptions.py @@ -0,0 +1,220 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + +from typing import Any, Optional +from typing_extensions import Self + + +class OpenApiException(Exception): + """The base exception class for all OpenAPIExceptions""" + + +class ApiTypeError(OpenApiException, TypeError): + def __init__( + self, msg, path_to_item=None, valid_classes=None, key_type=None + ) -> None: + """Raises an exception for TypeErrors + + Args: + msg (str): the exception message + + Keyword Args: + path_to_item (list): a list of keys an indices to get to the + current_item + None if unset + valid_classes (tuple): the primitive classes that current item + should be an instance of + None if unset + key_type (bool): False if our value is a value in a dict + True if it is a key in a dict + False if our item is an item in a list + None if unset + """ + self.path_to_item = path_to_item + self.valid_classes = valid_classes + self.key_type = key_type + full_msg = msg + if path_to_item: + full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) + super(ApiTypeError, self).__init__(full_msg) + + +class ApiValueError(OpenApiException, ValueError): + def __init__(self, msg, path_to_item=None) -> None: + """ + Args: + msg (str): the exception message + + Keyword Args: + path_to_item (list) the path to the exception in the + received_data dict. None if unset + """ + + self.path_to_item = path_to_item + full_msg = msg + if path_to_item: + full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) + super(ApiValueError, self).__init__(full_msg) + + +class ApiAttributeError(OpenApiException, AttributeError): + def __init__(self, msg, path_to_item=None) -> None: + """ + Raised when an attribute reference or assignment fails. + + Args: + msg (str): the exception message + + Keyword Args: + path_to_item (None/list) the path to the exception in the + received_data dict + """ + self.path_to_item = path_to_item + full_msg = msg + if path_to_item: + full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) + super(ApiAttributeError, self).__init__(full_msg) + + +class ApiKeyError(OpenApiException, KeyError): + def __init__(self, msg, path_to_item=None) -> None: + """ + Args: + msg (str): the exception message + + Keyword Args: + path_to_item (None/list) the path to the exception in the + received_data dict + """ + self.path_to_item = path_to_item + full_msg = msg + if path_to_item: + full_msg = "{0} at {1}".format(msg, render_path(path_to_item)) + super(ApiKeyError, self).__init__(full_msg) + + +class ApiException(OpenApiException): + + def __init__( + self, + status=None, + reason=None, + http_resp=None, + *, + body: Optional[str] = None, + data: Optional[Any] = None, + ) -> None: + self.status = status + self.reason = reason + self.body = body + self.data = data + self.headers = None + + if http_resp: + if self.status is None: + self.status = http_resp.status + if self.reason is None: + self.reason = http_resp.reason + if self.body is None: + try: + self.body = http_resp.data.decode("utf-8") + except Exception: + pass + self.headers = http_resp.getheaders() + + @classmethod + def from_response( + cls, + *, + http_resp, + body: Optional[str], + data: Optional[Any], + ) -> Self: + if http_resp.status == 400: + raise BadRequestException(http_resp=http_resp, body=body, data=data) + + if http_resp.status == 401: + raise UnauthorizedException(http_resp=http_resp, body=body, data=data) + + if http_resp.status == 403: + raise ForbiddenException(http_resp=http_resp, body=body, data=data) + + if http_resp.status == 404: + raise NotFoundException(http_resp=http_resp, body=body, data=data) + + # Added new conditions for 409 and 422 + if http_resp.status == 409: + raise ConflictException(http_resp=http_resp, body=body, data=data) + + if http_resp.status == 422: + raise UnprocessableEntityException( + http_resp=http_resp, body=body, data=data + ) + + if 500 <= http_resp.status <= 599: + raise ServiceException(http_resp=http_resp, body=body, data=data) + raise ApiException(http_resp=http_resp, body=body, data=data) + + def __str__(self): + """Custom error messages for exception""" + error_message = "({0})\n" "Reason: {1}\n".format(self.status, self.reason) + if self.headers: + error_message += "HTTP response headers: {0}\n".format(self.headers) + + if self.data or self.body: + error_message += "HTTP response body: {0}\n".format(self.data or self.body) + + return error_message + + +class BadRequestException(ApiException): + pass + + +class NotFoundException(ApiException): + pass + + +class UnauthorizedException(ApiException): + pass + + +class ForbiddenException(ApiException): + pass + + +class ServiceException(ApiException): + pass + + +class ConflictException(ApiException): + """Exception for HTTP 409 Conflict.""" + + pass + + +class UnprocessableEntityException(ApiException): + """Exception for HTTP 422 Unprocessable Entity.""" + + pass + + +def render_path(path_to_item): + """Returns a string representation of a path""" + result = "" + for pth in path_to_item: + if isinstance(pth, int): + result += "[{0}]".format(pth) + else: + result += "['{0}']".format(pth) + return result diff --git a/src/together/generated/models/__init__.py b/src/together/generated/models/__init__.py new file mode 100644 index 00000000..7b50c345 --- /dev/null +++ b/src/together/generated/models/__init__.py @@ -0,0 +1,197 @@ +# coding: utf-8 + +# flake8: noqa +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +# import models into model package +from together.generated.models.audio_speech_request import AudioSpeechRequest +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent +from together.generated.models.audio_speech_stream_response import ( + AudioSpeechStreamResponse, +) +from together.generated.models.autoscaling import Autoscaling +from together.generated.models.chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam, +) +from together.generated.models.chat_completion_choice import ChatCompletionChoice +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) +from together.generated.models.chat_completion_choice_delta_function_call import ( + ChatCompletionChoiceDeltaFunctionCall, +) +from together.generated.models.chat_completion_choices_data_inner import ( + ChatCompletionChoicesDataInner, +) +from together.generated.models.chat_completion_choices_data_inner_logprobs import ( + ChatCompletionChoicesDataInnerLogprobs, +) +from together.generated.models.chat_completion_chunk import ChatCompletionChunk +from together.generated.models.chat_completion_chunk_choices_inner import ( + ChatCompletionChunkChoicesInner, +) +from together.generated.models.chat_completion_event import ChatCompletionEvent +from together.generated.models.chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam, +) +from together.generated.models.chat_completion_message import ChatCompletionMessage +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) +from together.generated.models.chat_completion_message_param import ( + ChatCompletionMessageParam, +) +from together.generated.models.chat_completion_request import ChatCompletionRequest +from together.generated.models.chat_completion_request_function_call import ( + ChatCompletionRequestFunctionCall, +) +from together.generated.models.chat_completion_request_function_call_one_of import ( + ChatCompletionRequestFunctionCallOneOf, +) +from together.generated.models.chat_completion_request_messages_inner import ( + ChatCompletionRequestMessagesInner, +) +from together.generated.models.chat_completion_request_model import ( + ChatCompletionRequestModel, +) +from together.generated.models.chat_completion_request_response_format import ( + ChatCompletionRequestResponseFormat, +) +from together.generated.models.chat_completion_request_tool_choice import ( + ChatCompletionRequestToolChoice, +) +from together.generated.models.chat_completion_response import ChatCompletionResponse +from together.generated.models.chat_completion_stream import ChatCompletionStream +from together.generated.models.chat_completion_system_message_param import ( + ChatCompletionSystemMessageParam, +) +from together.generated.models.chat_completion_token import ChatCompletionToken +from together.generated.models.chat_completion_tool import ChatCompletionTool +from together.generated.models.chat_completion_tool_function import ( + ChatCompletionToolFunction, +) +from together.generated.models.chat_completion_tool_message_param import ( + ChatCompletionToolMessageParam, +) +from together.generated.models.chat_completion_user_message_param import ( + ChatCompletionUserMessageParam, +) +from together.generated.models.completion_choice import CompletionChoice +from together.generated.models.completion_choices_data_inner import ( + CompletionChoicesDataInner, +) +from together.generated.models.completion_chunk import CompletionChunk +from together.generated.models.completion_chunk_usage import CompletionChunkUsage +from together.generated.models.completion_event import CompletionEvent +from together.generated.models.completion_request import CompletionRequest +from together.generated.models.completion_request_model import CompletionRequestModel +from together.generated.models.completion_request_safety_model import ( + CompletionRequestSafetyModel, +) +from together.generated.models.completion_response import CompletionResponse +from together.generated.models.completion_stream import CompletionStream +from together.generated.models.completion_token import CompletionToken +from together.generated.models.create_endpoint_request import CreateEndpointRequest +from together.generated.models.dedicated_endpoint import DedicatedEndpoint +from together.generated.models.embeddings_request import EmbeddingsRequest +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel +from together.generated.models.embeddings_response import EmbeddingsResponse +from together.generated.models.embeddings_response_data_inner import ( + EmbeddingsResponseDataInner, +) +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.error_data import ErrorData +from together.generated.models.error_data_error import ErrorDataError +from together.generated.models.file_delete_response import FileDeleteResponse +from together.generated.models.file_list import FileList +from together.generated.models.file_object import FileObject +from together.generated.models.file_response import FileResponse +from together.generated.models.fine_tune_event import FineTuneEvent +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest +from together.generated.models.fine_tunes_post_request_train_on_inputs import ( + FineTunesPostRequestTrainOnInputs, +) +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) +from together.generated.models.finetune_download_result import FinetuneDownloadResult +from together.generated.models.finetune_event_levels import FinetuneEventLevels +from together.generated.models.finetune_event_type import FinetuneEventType +from together.generated.models.finetune_job_status import FinetuneJobStatus +from together.generated.models.finetune_list import FinetuneList +from together.generated.models.finetune_list_events import FinetuneListEvents +from together.generated.models.finetune_response import FinetuneResponse +from together.generated.models.finetune_response_train_on_inputs import ( + FinetuneResponseTrainOnInputs, +) +from together.generated.models.finish_reason import FinishReason +from together.generated.models.full_training_type import FullTrainingType +from together.generated.models.hardware_availability import HardwareAvailability +from together.generated.models.hardware_spec import HardwareSpec +from together.generated.models.hardware_with_status import HardwareWithStatus +from together.generated.models.image_response import ImageResponse +from together.generated.models.image_response_data_inner import ImageResponseDataInner +from together.generated.models.images_generations_post_request import ( + ImagesGenerationsPostRequest, +) +from together.generated.models.images_generations_post_request_image_loras_inner import ( + ImagesGenerationsPostRequestImageLorasInner, +) +from together.generated.models.images_generations_post_request_model import ( + ImagesGenerationsPostRequestModel, +) +from together.generated.models.lr_scheduler import LRScheduler +from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs +from together.generated.models.list_endpoint import ListEndpoint +from together.generated.models.list_endpoints200_response import ( + ListEndpoints200Response, +) +from together.generated.models.list_hardware200_response import ListHardware200Response +from together.generated.models.list_hardware200_response_one_of import ( + ListHardware200ResponseOneOf, +) +from together.generated.models.list_hardware200_response_one_of1 import ( + ListHardware200ResponseOneOf1, +) +from together.generated.models.list_hardware200_response_one_of1_data_inner import ( + ListHardware200ResponseOneOf1DataInner, +) +from together.generated.models.list_hardware200_response_one_of_data_inner import ( + ListHardware200ResponseOneOfDataInner, +) +from together.generated.models.lo_ra_training_type import LoRATrainingType +from together.generated.models.logprobs_part import LogprobsPart +from together.generated.models.model_info import ModelInfo +from together.generated.models.pricing import Pricing +from together.generated.models.prompt_part_inner import PromptPartInner +from together.generated.models.rerank_request import RerankRequest +from together.generated.models.rerank_request_documents import RerankRequestDocuments +from together.generated.models.rerank_request_model import RerankRequestModel +from together.generated.models.rerank_response import RerankResponse +from together.generated.models.rerank_response_results_inner import ( + RerankResponseResultsInner, +) +from together.generated.models.rerank_response_results_inner_document import ( + RerankResponseResultsInnerDocument, +) +from together.generated.models.stream_sentinel import StreamSentinel +from together.generated.models.tool_choice import ToolChoice +from together.generated.models.tool_choice_function import ToolChoiceFunction +from together.generated.models.tools_part import ToolsPart +from together.generated.models.tools_part_function import ToolsPartFunction +from together.generated.models.update_endpoint_request import UpdateEndpointRequest +from together.generated.models.usage_data import UsageData diff --git a/src/together/generated/models/audio_speech_request.py b/src/together/generated/models/audio_speech_request.py new file mode 100644 index 00000000..af1a11ef --- /dev/null +++ b/src/together/generated/models/audio_speech_request.py @@ -0,0 +1,212 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice +from typing import Optional, Set +from typing_extensions import Self + + +class AudioSpeechRequest(BaseModel): + """ + AudioSpeechRequest + """ # noqa: E501 + + model: AudioSpeechRequestModel + input: StrictStr = Field(description="Input text to generate the audio for") + voice: AudioSpeechRequestVoice + response_format: Optional[StrictStr] = Field( + default="wav", description="The format of audio output" + ) + language: Optional[StrictStr] = Field( + default="en", description="Language of input text" + ) + response_encoding: Optional[StrictStr] = Field( + default="pcm_f32le", description="Audio encoding of response" + ) + sample_rate: Optional[Union[StrictFloat, StrictInt]] = Field( + default=44100, description="Sampling rate to use for the output audio" + ) + stream: Optional[StrictBool] = Field( + default=False, + description="If true, output is streamed for several characters at a time instead of waiting for the full response. The stream terminates with `data: [DONE]`. If false, return the encoded audio as octet stream", + ) + __properties: ClassVar[List[str]] = [ + "model", + "input", + "voice", + "response_format", + "language", + "response_encoding", + "sample_rate", + "stream", + ] + + @field_validator("response_format") + def response_format_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["mp3", "wav", "raw"]): + raise ValueError("must be one of enum values ('mp3', 'wav', 'raw')") + return value + + @field_validator("language") + def language_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set( + [ + "en", + "de", + "fr", + "es", + "hi", + "it", + "ja", + "ko", + "nl", + "pl", + "pt", + "ru", + "sv", + "tr", + "zh", + ] + ): + raise ValueError( + "must be one of enum values ('en', 'de', 'fr', 'es', 'hi', 'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ru', 'sv', 'tr', 'zh')" + ) + return value + + @field_validator("response_encoding") + def response_encoding_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["pcm_f32le", "pcm_s16le", "pcm_mulaw", "pcm_alaw"]): + raise ValueError( + "must be one of enum values ('pcm_f32le', 'pcm_s16le', 'pcm_mulaw', 'pcm_alaw')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of AudioSpeechRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of voice + if self.voice: + _dict["voice"] = self.voice.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of AudioSpeechRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "model": ( + AudioSpeechRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "input": obj.get("input"), + "voice": ( + AudioSpeechRequestVoice.from_dict(obj["voice"]) + if obj.get("voice") is not None + else None + ), + "response_format": ( + obj.get("response_format") + if obj.get("response_format") is not None + else "wav" + ), + "language": ( + obj.get("language") if obj.get("language") is not None else "en" + ), + "response_encoding": ( + obj.get("response_encoding") + if obj.get("response_encoding") is not None + else "pcm_f32le" + ), + "sample_rate": ( + obj.get("sample_rate") + if obj.get("sample_rate") is not None + else 44100 + ), + "stream": obj.get("stream") if obj.get("stream") is not None else False, + } + ) + return _obj diff --git a/src/together/generated/models/audio_speech_request_model.py b/src/together/generated/models/audio_speech_request_model.py new file mode 100644 index 00000000..4ab613b5 --- /dev/null +++ b/src/together/generated/models/audio_speech_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +AUDIOSPEECHREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class AudioSpeechRequestModel(BaseModel): + """ + The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = AudioSpeechRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in AudioSpeechRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into AudioSpeechRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/audio_speech_request_voice.py b/src/together/generated/models/audio_speech_request_voice.py new file mode 100644 index 00000000..81c1f689 --- /dev/null +++ b/src/together/generated/models/audio_speech_request_voice.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +AUDIOSPEECHREQUESTVOICE_ANY_OF_SCHEMAS = ["str"] + + +class AudioSpeechRequestVoice(BaseModel): + """ + The voice to use for generating the audio. [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = AudioSpeechRequestVoice.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in AudioSpeechRequestVoice with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into AudioSpeechRequestVoice with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/audio_speech_stream_chunk.py b/src/together/generated/models/audio_speech_stream_chunk.py new file mode 100644 index 00000000..27627dd2 --- /dev/null +++ b/src/together/generated/models/audio_speech_stream_chunk.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class AudioSpeechStreamChunk(BaseModel): + """ + AudioSpeechStreamChunk + """ # noqa: E501 + + object: StrictStr + model: StrictStr + b64: StrictStr = Field(description="base64 encoded audio stream") + __properties: ClassVar[List[str]] = ["object", "model", "b64"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["audio.tts.chunk"]): + raise ValueError("must be one of enum values ('audio.tts.chunk')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of AudioSpeechStreamChunk from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of AudioSpeechStreamChunk from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "model": obj.get("model"), + "b64": obj.get("b64"), + } + ) + return _obj diff --git a/src/together/generated/models/audio_speech_stream_event.py b/src/together/generated/models/audio_speech_stream_event.py new file mode 100644 index 00000000..9c11b923 --- /dev/null +++ b/src/together/generated/models/audio_speech_stream_event.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk +from typing import Optional, Set +from typing_extensions import Self + + +class AudioSpeechStreamEvent(BaseModel): + """ + AudioSpeechStreamEvent + """ # noqa: E501 + + data: AudioSpeechStreamChunk + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of AudioSpeechStreamEvent from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of data + if self.data: + _dict["data"] = self.data.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of AudioSpeechStreamEvent from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + AudioSpeechStreamChunk.from_dict(obj["data"]) + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/audio_speech_stream_response.py b/src/together/generated/models/audio_speech_stream_response.py new file mode 100644 index 00000000..b573857d --- /dev/null +++ b/src/together/generated/models/audio_speech_stream_response.py @@ -0,0 +1,169 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent +from together.generated.models.stream_sentinel import StreamSentinel +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +AUDIOSPEECHSTREAMRESPONSE_ONE_OF_SCHEMAS = ["AudioSpeechStreamEvent", "StreamSentinel"] + + +class AudioSpeechStreamResponse(BaseModel): + """ + AudioSpeechStreamResponse + """ + + # data type: AudioSpeechStreamEvent + oneof_schema_1_validator: Optional[AudioSpeechStreamEvent] = None + # data type: StreamSentinel + oneof_schema_2_validator: Optional[StreamSentinel] = None + actual_instance: Optional[Union[AudioSpeechStreamEvent, StreamSentinel]] = None + one_of_schemas: Set[str] = {"AudioSpeechStreamEvent", "StreamSentinel"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = AudioSpeechStreamResponse.model_construct() + error_messages = [] + match = 0 + # validate data type: AudioSpeechStreamEvent + if not isinstance(v, AudioSpeechStreamEvent): + error_messages.append( + f"Error! Input type `{type(v)}` is not `AudioSpeechStreamEvent`" + ) + else: + match += 1 + # validate data type: StreamSentinel + if not isinstance(v, StreamSentinel): + error_messages.append( + f"Error! Input type `{type(v)}` is not `StreamSentinel`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into AudioSpeechStreamEvent + try: + instance.actual_instance = AudioSpeechStreamEvent.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into StreamSentinel + try: + instance.actual_instance = StreamSentinel.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into AudioSpeechStreamResponse with oneOf schemas: AudioSpeechStreamEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], AudioSpeechStreamEvent, StreamSentinel]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/autoscaling.py b/src/together/generated/models/autoscaling.py new file mode 100644 index 00000000..fb79d4f5 --- /dev/null +++ b/src/together/generated/models/autoscaling.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictInt +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class Autoscaling(BaseModel): + """ + Configuration for automatic scaling of replicas based on demand. + """ # noqa: E501 + + min_replicas: StrictInt = Field( + description="The minimum number of replicas to maintain, even when there is no load" + ) + max_replicas: StrictInt = Field( + description="The maximum number of replicas to scale up to under load" + ) + __properties: ClassVar[List[str]] = ["min_replicas", "max_replicas"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Autoscaling from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Autoscaling from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "min_replicas": obj.get("min_replicas"), + "max_replicas": obj.get("max_replicas"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_assistant_message_param.py b/src/together/generated/models/chat_completion_assistant_message_param.py new file mode 100644 index 00000000..dbb10cfe --- /dev/null +++ b/src/together/generated/models/chat_completion_assistant_message_param.py @@ -0,0 +1,130 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) +from together.generated.models.tool_choice import ToolChoice +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionAssistantMessageParam(BaseModel): + """ + ChatCompletionAssistantMessageParam + """ # noqa: E501 + + content: Optional[StrictStr] = None + role: StrictStr + name: Optional[StrictStr] = None + tool_calls: Optional[List[ToolChoice]] = None + function_call: Optional[ChatCompletionMessageFunctionCall] = None + __properties: ClassVar[List[str]] = [ + "content", + "role", + "name", + "tool_calls", + "function_call", + ] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["assistant"]): + raise ValueError("must be one of enum values ('assistant')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionAssistantMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) + _items = [] + if self.tool_calls: + for _item_tool_calls in self.tool_calls: + if _item_tool_calls: + _items.append(_item_tool_calls.to_dict()) + _dict["tool_calls"] = _items + # override the default output from pydantic by calling `to_dict()` of function_call + if self.function_call: + _dict["function_call"] = self.function_call.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionAssistantMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "role": obj.get("role"), + "name": obj.get("name"), + "tool_calls": ( + [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] + if obj.get("tool_calls") is not None + else None + ), + "function_call": ( + ChatCompletionMessageFunctionCall.from_dict(obj["function_call"]) + if obj.get("function_call") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choice.py b/src/together/generated/models/chat_completion_choice.py new file mode 100644 index 00000000..3cd51127 --- /dev/null +++ b/src/together/generated/models/chat_completion_choice.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) +from together.generated.models.finish_reason import FinishReason +from together.generated.models.logprobs_part import LogprobsPart +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoice(BaseModel): + """ + ChatCompletionChoice + """ # noqa: E501 + + index: StrictInt + finish_reason: FinishReason + logprobs: Optional[LogprobsPart] = None + delta: ChatCompletionChoiceDelta + __properties: ClassVar[List[str]] = ["index", "finish_reason", "logprobs", "delta"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoice from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of logprobs + if self.logprobs: + _dict["logprobs"] = self.logprobs.to_dict() + # override the default output from pydantic by calling `to_dict()` of delta + if self.delta: + _dict["delta"] = self.delta.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoice from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "index": obj.get("index"), + "finish_reason": obj.get("finish_reason"), + "logprobs": ( + LogprobsPart.from_dict(obj["logprobs"]) + if obj.get("logprobs") is not None + else None + ), + "delta": ( + ChatCompletionChoiceDelta.from_dict(obj["delta"]) + if obj.get("delta") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choice_delta.py b/src/together/generated/models/chat_completion_choice_delta.py new file mode 100644 index 00000000..be8bde6b --- /dev/null +++ b/src/together/generated/models/chat_completion_choice_delta.py @@ -0,0 +1,134 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_choice_delta_function_call import ( + ChatCompletionChoiceDeltaFunctionCall, +) +from together.generated.models.tool_choice import ToolChoice +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoiceDelta(BaseModel): + """ + ChatCompletionChoiceDelta + """ # noqa: E501 + + token_id: Optional[StrictInt] = None + role: StrictStr + content: Optional[StrictStr] = None + tool_calls: Optional[List[ToolChoice]] = None + function_call: Optional[ChatCompletionChoiceDeltaFunctionCall] = None + __properties: ClassVar[List[str]] = [ + "token_id", + "role", + "content", + "tool_calls", + "function_call", + ] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["system", "user", "assistant", "function", "tool"]): + raise ValueError( + "must be one of enum values ('system', 'user', 'assistant', 'function', 'tool')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoiceDelta from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) + _items = [] + if self.tool_calls: + for _item_tool_calls in self.tool_calls: + if _item_tool_calls: + _items.append(_item_tool_calls.to_dict()) + _dict["tool_calls"] = _items + # override the default output from pydantic by calling `to_dict()` of function_call + if self.function_call: + _dict["function_call"] = self.function_call.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoiceDelta from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "token_id": obj.get("token_id"), + "role": obj.get("role"), + "content": obj.get("content"), + "tool_calls": ( + [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] + if obj.get("tool_calls") is not None + else None + ), + "function_call": ( + ChatCompletionChoiceDeltaFunctionCall.from_dict( + obj["function_call"] + ) + if obj.get("function_call") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choice_delta_function_call.py b/src/together/generated/models/chat_completion_choice_delta_function_call.py new file mode 100644 index 00000000..4d1c4079 --- /dev/null +++ b/src/together/generated/models/chat_completion_choice_delta_function_call.py @@ -0,0 +1,86 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoiceDeltaFunctionCall(BaseModel): + """ + ChatCompletionChoiceDeltaFunctionCall + """ # noqa: E501 + + arguments: StrictStr + name: StrictStr + __properties: ClassVar[List[str]] = ["arguments", "name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoiceDeltaFunctionCall from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoiceDeltaFunctionCall from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"arguments": obj.get("arguments"), "name": obj.get("name")} + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choices_data_inner.py b/src/together/generated/models/chat_completion_choices_data_inner.py new file mode 100644 index 00000000..042cd5f3 --- /dev/null +++ b/src/together/generated/models/chat_completion_choices_data_inner.py @@ -0,0 +1,123 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_choices_data_inner_logprobs import ( + ChatCompletionChoicesDataInnerLogprobs, +) +from together.generated.models.chat_completion_message import ChatCompletionMessage +from together.generated.models.finish_reason import FinishReason +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoicesDataInner(BaseModel): + """ + ChatCompletionChoicesDataInner + """ # noqa: E501 + + text: Optional[StrictStr] = None + index: Optional[StrictInt] = None + seed: Optional[StrictInt] = None + finish_reason: Optional[FinishReason] = None + message: Optional[ChatCompletionMessage] = None + logprobs: Optional[ChatCompletionChoicesDataInnerLogprobs] = None + __properties: ClassVar[List[str]] = [ + "text", + "index", + "seed", + "finish_reason", + "message", + "logprobs", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoicesDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of message + if self.message: + _dict["message"] = self.message.to_dict() + # override the default output from pydantic by calling `to_dict()` of logprobs + if self.logprobs: + _dict["logprobs"] = self.logprobs.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoicesDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "text": obj.get("text"), + "index": obj.get("index"), + "seed": obj.get("seed"), + "finish_reason": obj.get("finish_reason"), + "message": ( + ChatCompletionMessage.from_dict(obj["message"]) + if obj.get("message") is not None + else None + ), + "logprobs": ( + ChatCompletionChoicesDataInnerLogprobs.from_dict(obj["logprobs"]) + if obj.get("logprobs") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py b/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py new file mode 100644 index 00000000..608b6c10 --- /dev/null +++ b/src/together/generated/models/chat_completion_choices_data_inner_logprobs.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChoicesDataInnerLogprobs(BaseModel): + """ + ChatCompletionChoicesDataInnerLogprobs + """ # noqa: E501 + + token_ids: Optional[List[Union[StrictFloat, StrictInt]]] = Field( + default=None, description="List of token IDs corresponding to the logprobs" + ) + tokens: Optional[List[StrictStr]] = Field( + default=None, description="List of token strings" + ) + token_logprobs: Optional[List[Union[StrictFloat, StrictInt]]] = Field( + default=None, description="List of token log probabilities" + ) + __properties: ClassVar[List[str]] = ["token_ids", "tokens", "token_logprobs"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChoicesDataInnerLogprobs from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChoicesDataInnerLogprobs from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "token_ids": obj.get("token_ids"), + "tokens": obj.get("tokens"), + "token_logprobs": obj.get("token_logprobs"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_chunk.py b/src/together/generated/models/chat_completion_chunk.py new file mode 100644 index 00000000..5d1dd9de --- /dev/null +++ b/src/together/generated/models/chat_completion_chunk.py @@ -0,0 +1,139 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_chunk_choices_inner import ( + ChatCompletionChunkChoicesInner, +) +from together.generated.models.completion_chunk_usage import CompletionChunkUsage +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChunk(BaseModel): + """ + ChatCompletionChunk + """ # noqa: E501 + + id: StrictStr + object: StrictStr + created: StrictInt + system_fingerprint: Optional[StrictStr] = None + model: StrictStr + choices: List[ChatCompletionChunkChoicesInner] + usage: Optional[CompletionChunkUsage] = None + __properties: ClassVar[List[str]] = [ + "id", + "object", + "created", + "system_fingerprint", + "model", + "choices", + "usage", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["chat.completion.chunk"]): + raise ValueError("must be one of enum values ('chat.completion.chunk')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChunk from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in choices (list) + _items = [] + if self.choices: + for _item_choices in self.choices: + if _item_choices: + _items.append(_item_choices.to_dict()) + _dict["choices"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChunk from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "object": obj.get("object"), + "created": obj.get("created"), + "system_fingerprint": obj.get("system_fingerprint"), + "model": obj.get("model"), + "choices": ( + [ + ChatCompletionChunkChoicesInner.from_dict(_item) + for _item in obj["choices"] + ] + if obj.get("choices") is not None + else None + ), + "usage": ( + CompletionChunkUsage.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_chunk_choices_inner.py b/src/together/generated/models/chat_completion_chunk_choices_inner.py new file mode 100644 index 00000000..fd5a9ef9 --- /dev/null +++ b/src/together/generated/models/chat_completion_chunk_choices_inner.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) +from together.generated.models.finish_reason import FinishReason +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionChunkChoicesInner(BaseModel): + """ + ChatCompletionChunkChoicesInner + """ # noqa: E501 + + index: StrictInt + finish_reason: FinishReason + logprobs: Optional[Union[StrictFloat, StrictInt]] = None + seed: Optional[StrictInt] = None + delta: ChatCompletionChoiceDelta + __properties: ClassVar[List[str]] = [ + "index", + "finish_reason", + "logprobs", + "seed", + "delta", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionChunkChoicesInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of delta + if self.delta: + _dict["delta"] = self.delta.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionChunkChoicesInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "index": obj.get("index"), + "finish_reason": obj.get("finish_reason"), + "logprobs": obj.get("logprobs"), + "seed": obj.get("seed"), + "delta": ( + ChatCompletionChoiceDelta.from_dict(obj["delta"]) + if obj.get("delta") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_event.py b/src/together/generated/models/chat_completion_event.py new file mode 100644 index 00000000..49c84ba7 --- /dev/null +++ b/src/together/generated/models/chat_completion_event.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.chat_completion_chunk import ChatCompletionChunk +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionEvent(BaseModel): + """ + ChatCompletionEvent + """ # noqa: E501 + + data: ChatCompletionChunk + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionEvent from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of data + if self.data: + _dict["data"] = self.data.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionEvent from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + ChatCompletionChunk.from_dict(obj["data"]) + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_function_message_param.py b/src/together/generated/models/chat_completion_function_message_param.py new file mode 100644 index 00000000..b6679430 --- /dev/null +++ b/src/together/generated/models/chat_completion_function_message_param.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionFunctionMessageParam(BaseModel): + """ + ChatCompletionFunctionMessageParam + """ # noqa: E501 + + role: StrictStr + content: StrictStr + name: StrictStr + __properties: ClassVar[List[str]] = ["role", "content", "name"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["function"]): + raise ValueError("must be one of enum values ('function')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionFunctionMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionFunctionMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "role": obj.get("role"), + "content": obj.get("content"), + "name": obj.get("name"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_message.py b/src/together/generated/models/chat_completion_message.py new file mode 100644 index 00000000..08eeca7a --- /dev/null +++ b/src/together/generated/models/chat_completion_message.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) +from together.generated.models.tool_choice import ToolChoice +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionMessage(BaseModel): + """ + ChatCompletionMessage + """ # noqa: E501 + + content: StrictStr + role: StrictStr + tool_calls: Optional[List[ToolChoice]] = None + function_call: Optional[ChatCompletionMessageFunctionCall] = None + __properties: ClassVar[List[str]] = [ + "content", + "role", + "tool_calls", + "function_call", + ] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["assistant"]): + raise ValueError("must be one of enum values ('assistant')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionMessage from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in tool_calls (list) + _items = [] + if self.tool_calls: + for _item_tool_calls in self.tool_calls: + if _item_tool_calls: + _items.append(_item_tool_calls.to_dict()) + _dict["tool_calls"] = _items + # override the default output from pydantic by calling `to_dict()` of function_call + if self.function_call: + _dict["function_call"] = self.function_call.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionMessage from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "role": obj.get("role"), + "tool_calls": ( + [ToolChoice.from_dict(_item) for _item in obj["tool_calls"]] + if obj.get("tool_calls") is not None + else None + ), + "function_call": ( + ChatCompletionMessageFunctionCall.from_dict(obj["function_call"]) + if obj.get("function_call") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_message_function_call.py b/src/together/generated/models/chat_completion_message_function_call.py new file mode 100644 index 00000000..6deaeb4a --- /dev/null +++ b/src/together/generated/models/chat_completion_message_function_call.py @@ -0,0 +1,86 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionMessageFunctionCall(BaseModel): + """ + ChatCompletionMessageFunctionCall + """ # noqa: E501 + + arguments: StrictStr + name: StrictStr + __properties: ClassVar[List[str]] = ["arguments", "name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionMessageFunctionCall from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionMessageFunctionCall from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"arguments": obj.get("arguments"), "name": obj.get("name")} + ) + return _obj diff --git a/src/together/generated/models/chat_completion_message_param.py b/src/together/generated/models/chat_completion_message_param.py new file mode 100644 index 00000000..1984218d --- /dev/null +++ b/src/together/generated/models/chat_completion_message_param.py @@ -0,0 +1,266 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam, +) +from together.generated.models.chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam, +) +from together.generated.models.chat_completion_system_message_param import ( + ChatCompletionSystemMessageParam, +) +from together.generated.models.chat_completion_tool_message_param import ( + ChatCompletionToolMessageParam, +) +from together.generated.models.chat_completion_user_message_param import ( + ChatCompletionUserMessageParam, +) +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +CHATCOMPLETIONMESSAGEPARAM_ONE_OF_SCHEMAS = [ + "ChatCompletionAssistantMessageParam", + "ChatCompletionFunctionMessageParam", + "ChatCompletionSystemMessageParam", + "ChatCompletionToolMessageParam", + "ChatCompletionUserMessageParam", +] + + +class ChatCompletionMessageParam(BaseModel): + """ + ChatCompletionMessageParam + """ + + # data type: ChatCompletionSystemMessageParam + oneof_schema_1_validator: Optional[ChatCompletionSystemMessageParam] = None + # data type: ChatCompletionUserMessageParam + oneof_schema_2_validator: Optional[ChatCompletionUserMessageParam] = None + # data type: ChatCompletionAssistantMessageParam + oneof_schema_3_validator: Optional[ChatCompletionAssistantMessageParam] = None + # data type: ChatCompletionToolMessageParam + oneof_schema_4_validator: Optional[ChatCompletionToolMessageParam] = None + # data type: ChatCompletionFunctionMessageParam + oneof_schema_5_validator: Optional[ChatCompletionFunctionMessageParam] = None + actual_instance: Optional[ + Union[ + ChatCompletionAssistantMessageParam, + ChatCompletionFunctionMessageParam, + ChatCompletionSystemMessageParam, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, + ] + ] = None + one_of_schemas: Set[str] = { + "ChatCompletionAssistantMessageParam", + "ChatCompletionFunctionMessageParam", + "ChatCompletionSystemMessageParam", + "ChatCompletionToolMessageParam", + "ChatCompletionUserMessageParam", + } + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ChatCompletionMessageParam.model_construct() + error_messages = [] + match = 0 + # validate data type: ChatCompletionSystemMessageParam + if not isinstance(v, ChatCompletionSystemMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionSystemMessageParam`" + ) + else: + match += 1 + # validate data type: ChatCompletionUserMessageParam + if not isinstance(v, ChatCompletionUserMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionUserMessageParam`" + ) + else: + match += 1 + # validate data type: ChatCompletionAssistantMessageParam + if not isinstance(v, ChatCompletionAssistantMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionAssistantMessageParam`" + ) + else: + match += 1 + # validate data type: ChatCompletionToolMessageParam + if not isinstance(v, ChatCompletionToolMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionToolMessageParam`" + ) + else: + match += 1 + # validate data type: ChatCompletionFunctionMessageParam + if not isinstance(v, ChatCompletionFunctionMessageParam): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionFunctionMessageParam`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into ChatCompletionSystemMessageParam + try: + instance.actual_instance = ChatCompletionSystemMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionUserMessageParam + try: + instance.actual_instance = ChatCompletionUserMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionAssistantMessageParam + try: + instance.actual_instance = ChatCompletionAssistantMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionToolMessageParam + try: + instance.actual_instance = ChatCompletionToolMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionFunctionMessageParam + try: + instance.actual_instance = ChatCompletionFunctionMessageParam.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionMessageParam with oneOf schemas: ChatCompletionAssistantMessageParam, ChatCompletionFunctionMessageParam, ChatCompletionSystemMessageParam, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[ + Union[ + Dict[str, Any], + ChatCompletionAssistantMessageParam, + ChatCompletionFunctionMessageParam, + ChatCompletionSystemMessageParam, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, + ] + ]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request.py b/src/together/generated/models/chat_completion_request.py new file mode 100644 index 00000000..c779b6c5 --- /dev/null +++ b/src/together/generated/models/chat_completion_request.py @@ -0,0 +1,304 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing_extensions import Annotated +from together.generated.models.chat_completion_request_function_call import ( + ChatCompletionRequestFunctionCall, +) +from together.generated.models.chat_completion_request_messages_inner import ( + ChatCompletionRequestMessagesInner, +) +from together.generated.models.chat_completion_request_model import ( + ChatCompletionRequestModel, +) +from together.generated.models.chat_completion_request_response_format import ( + ChatCompletionRequestResponseFormat, +) +from together.generated.models.chat_completion_request_tool_choice import ( + ChatCompletionRequestToolChoice, +) +from together.generated.models.tools_part import ToolsPart +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionRequest(BaseModel): + """ + ChatCompletionRequest + """ # noqa: E501 + + messages: List[ChatCompletionRequestMessagesInner] = Field( + description="A list of messages comprising the conversation so far." + ) + model: ChatCompletionRequestModel + max_tokens: Optional[StrictInt] = Field( + default=None, description="The maximum number of tokens to generate." + ) + stop: Optional[List[StrictStr]] = Field( + default=None, + description='A list of string sequences that will truncate (stop) inference text output. For example, "" will stop generation as soon as the model generates the given token.', + ) + temperature: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.", + ) + top_p: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.", + ) + top_k: Optional[StrictInt] = Field( + default=None, + description="An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.", + ) + context_length_exceeded_behavior: Optional[StrictStr] = Field( + default="error", + description="Defined the behavior of the API when max_tokens exceed the maximum context length of the model. When set to 'error', API will return 400 with appropriate error message. When set to 'truncate', override the max_tokens with maximum context length of the model.", + ) + repetition_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.", + ) + stream: Optional[StrictBool] = Field( + default=None, + description="If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results.", + ) + logprobs: Optional[Annotated[int, Field(le=1, strict=True, ge=0)]] = Field( + default=None, + description="Determines the number of most likely tokens to return at each token position log probabilities to return.", + ) + echo: Optional[StrictBool] = Field( + default=None, + description="If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs.", + ) + n: Optional[Annotated[int, Field(le=128, strict=True, ge=1)]] = Field( + default=None, + description="The number of completions to generate for each prompt.", + ) + min_p: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between 0 and 1 that can be used as an alternative to top_p and top-k.", + ) + presence_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.", + ) + frequency_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.", + ) + logit_bias: Optional[Dict[str, Union[StrictFloat, StrictInt]]] = Field( + default=None, + description="Adjusts the likelihood of specific tokens appearing in the generated output.", + ) + seed: Optional[StrictInt] = Field( + default=None, description="Seed value for reproducibility." + ) + function_call: Optional[ChatCompletionRequestFunctionCall] = None + response_format: Optional[ChatCompletionRequestResponseFormat] = None + tools: Optional[List[ToolsPart]] = Field( + default=None, + description="A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.", + ) + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + safety_model: Optional[StrictStr] = Field( + default=None, + description="The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models).", + ) + __properties: ClassVar[List[str]] = [ + "messages", + "model", + "max_tokens", + "stop", + "temperature", + "top_p", + "top_k", + "context_length_exceeded_behavior", + "repetition_penalty", + "stream", + "logprobs", + "echo", + "n", + "min_p", + "presence_penalty", + "frequency_penalty", + "logit_bias", + "seed", + "function_call", + "response_format", + "tools", + "tool_choice", + "safety_model", + ] + + @field_validator("context_length_exceeded_behavior") + def context_length_exceeded_behavior_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["truncate", "error"]): + raise ValueError("must be one of enum values ('truncate', 'error')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in messages (list) + _items = [] + if self.messages: + for _item_messages in self.messages: + if _item_messages: + _items.append(_item_messages.to_dict()) + _dict["messages"] = _items + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of function_call + if self.function_call: + _dict["function_call"] = self.function_call.to_dict() + # override the default output from pydantic by calling `to_dict()` of response_format + if self.response_format: + _dict["response_format"] = self.response_format.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in tools (list) + _items = [] + if self.tools: + for _item_tools in self.tools: + if _item_tools: + _items.append(_item_tools.to_dict()) + _dict["tools"] = _items + # override the default output from pydantic by calling `to_dict()` of tool_choice + if self.tool_choice: + _dict["tool_choice"] = self.tool_choice.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "messages": ( + [ + ChatCompletionRequestMessagesInner.from_dict(_item) + for _item in obj["messages"] + ] + if obj.get("messages") is not None + else None + ), + "model": ( + ChatCompletionRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "max_tokens": obj.get("max_tokens"), + "stop": obj.get("stop"), + "temperature": obj.get("temperature"), + "top_p": obj.get("top_p"), + "top_k": obj.get("top_k"), + "context_length_exceeded_behavior": ( + obj.get("context_length_exceeded_behavior") + if obj.get("context_length_exceeded_behavior") is not None + else "error" + ), + "repetition_penalty": obj.get("repetition_penalty"), + "stream": obj.get("stream"), + "logprobs": obj.get("logprobs"), + "echo": obj.get("echo"), + "n": obj.get("n"), + "min_p": obj.get("min_p"), + "presence_penalty": obj.get("presence_penalty"), + "frequency_penalty": obj.get("frequency_penalty"), + "logit_bias": obj.get("logit_bias"), + "seed": obj.get("seed"), + "function_call": ( + ChatCompletionRequestFunctionCall.from_dict(obj["function_call"]) + if obj.get("function_call") is not None + else None + ), + "response_format": ( + ChatCompletionRequestResponseFormat.from_dict( + obj["response_format"] + ) + if obj.get("response_format") is not None + else None + ), + "tools": ( + [ToolsPart.from_dict(_item) for _item in obj["tools"]] + if obj.get("tools") is not None + else None + ), + "tool_choice": ( + ChatCompletionRequestToolChoice.from_dict(obj["tool_choice"]) + if obj.get("tool_choice") is not None + else None + ), + "safety_model": obj.get("safety_model"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_request_function_call.py b/src/together/generated/models/chat_completion_request_function_call.py new file mode 100644 index 00000000..cb8159fa --- /dev/null +++ b/src/together/generated/models/chat_completion_request_function_call.py @@ -0,0 +1,177 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.chat_completion_request_function_call_one_of import ( + ChatCompletionRequestFunctionCallOneOf, +) +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +CHATCOMPLETIONREQUESTFUNCTIONCALL_ONE_OF_SCHEMAS = [ + "ChatCompletionRequestFunctionCallOneOf", + "str", +] + + +class ChatCompletionRequestFunctionCall(BaseModel): + """ + ChatCompletionRequestFunctionCall + """ + + # data type: str + oneof_schema_1_validator: Optional[StrictStr] = None + # data type: ChatCompletionRequestFunctionCallOneOf + oneof_schema_2_validator: Optional[ChatCompletionRequestFunctionCallOneOf] = None + actual_instance: Optional[Union[ChatCompletionRequestFunctionCallOneOf, str]] = None + one_of_schemas: Set[str] = {"ChatCompletionRequestFunctionCallOneOf", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ChatCompletionRequestFunctionCall.model_construct() + error_messages = [] + match = 0 + # validate data type: str + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: ChatCompletionRequestFunctionCallOneOf + if not isinstance(v, ChatCompletionRequestFunctionCallOneOf): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionRequestFunctionCallOneOf`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into str + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ChatCompletionRequestFunctionCallOneOf + try: + instance.actual_instance = ChatCompletionRequestFunctionCallOneOf.from_json( + json_str + ) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionRequestFunctionCall with oneOf schemas: ChatCompletionRequestFunctionCallOneOf, str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], ChatCompletionRequestFunctionCallOneOf, str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request_function_call_one_of.py b/src/together/generated/models/chat_completion_request_function_call_one_of.py new file mode 100644 index 00000000..1dc7940e --- /dev/null +++ b/src/together/generated/models/chat_completion_request_function_call_one_of.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionRequestFunctionCallOneOf(BaseModel): + """ + ChatCompletionRequestFunctionCallOneOf + """ # noqa: E501 + + name: StrictStr + __properties: ClassVar[List[str]] = ["name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionRequestFunctionCallOneOf from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionRequestFunctionCallOneOf from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"name": obj.get("name")}) + return _obj diff --git a/src/together/generated/models/chat_completion_request_messages_inner.py b/src/together/generated/models/chat_completion_request_messages_inner.py new file mode 100644 index 00000000..2c9802b9 --- /dev/null +++ b/src/together/generated/models/chat_completion_request_messages_inner.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionRequestMessagesInner(BaseModel): + """ + ChatCompletionRequestMessagesInner + """ # noqa: E501 + + role: StrictStr = Field( + description="The role of the messages author. Choice between: system, user, or assistant." + ) + content: StrictStr = Field( + description="The content of the message, which can either be a simple string or a structured format." + ) + __properties: ClassVar[List[str]] = ["role", "content"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["system", "user", "assistant", "tool"]): + raise ValueError( + "must be one of enum values ('system', 'user', 'assistant', 'tool')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionRequestMessagesInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionRequestMessagesInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"role": obj.get("role"), "content": obj.get("content")} + ) + return _obj diff --git a/src/together/generated/models/chat_completion_request_model.py b/src/together/generated/models/chat_completion_request_model.py new file mode 100644 index 00000000..f5cd61a8 --- /dev/null +++ b/src/together/generated/models/chat_completion_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +CHATCOMPLETIONREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class ChatCompletionRequestModel(BaseModel): + """ + The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = ChatCompletionRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in ChatCompletionRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_request_response_format.py b/src/together/generated/models/chat_completion_request_response_format.py new file mode 100644 index 00000000..69ab762f --- /dev/null +++ b/src/together/generated/models/chat_completion_request_response_format.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionRequestResponseFormat(BaseModel): + """ + An object specifying the format that the model must output. + """ # noqa: E501 + + type: Optional[StrictStr] = Field( + default=None, description="The type of the response format." + ) + var_schema: Optional[Dict[str, StrictStr]] = Field( + default=None, description="The schema of the response format.", alias="schema" + ) + __properties: ClassVar[List[str]] = ["type", "schema"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionRequestResponseFormat from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionRequestResponseFormat from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"type": obj.get("type"), "schema": obj.get("schema")} + ) + return _obj diff --git a/src/together/generated/models/chat_completion_request_tool_choice.py b/src/together/generated/models/chat_completion_request_tool_choice.py new file mode 100644 index 00000000..d78632af --- /dev/null +++ b/src/together/generated/models/chat_completion_request_tool_choice.py @@ -0,0 +1,166 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.tool_choice import ToolChoice +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +CHATCOMPLETIONREQUESTTOOLCHOICE_ONE_OF_SCHEMAS = ["ToolChoice", "str"] + + +class ChatCompletionRequestToolChoice(BaseModel): + """ + Controls which (if any) function is called by the model. By default uses `auto`, which lets the model pick between generating a message or calling a function. + """ + + # data type: str + oneof_schema_1_validator: Optional[StrictStr] = None + # data type: ToolChoice + oneof_schema_2_validator: Optional[ToolChoice] = None + actual_instance: Optional[Union[ToolChoice, str]] = None + one_of_schemas: Set[str] = {"ToolChoice", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ChatCompletionRequestToolChoice.model_construct() + error_messages = [] + match = 0 + # validate data type: str + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: ToolChoice + if not isinstance(v, ToolChoice): + error_messages.append(f"Error! Input type `{type(v)}` is not `ToolChoice`") + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into str + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ToolChoice + try: + instance.actual_instance = ToolChoice.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionRequestToolChoice with oneOf schemas: ToolChoice, str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], ToolChoice, str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_response.py b/src/together/generated/models/chat_completion_response.py new file mode 100644 index 00000000..1a023acb --- /dev/null +++ b/src/together/generated/models/chat_completion_response.py @@ -0,0 +1,136 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.chat_completion_choices_data_inner import ( + ChatCompletionChoicesDataInner, +) +from together.generated.models.usage_data import UsageData +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionResponse(BaseModel): + """ + ChatCompletionResponse + """ # noqa: E501 + + id: StrictStr + choices: List[ChatCompletionChoicesDataInner] + usage: Optional[UsageData] = None + created: StrictInt + model: StrictStr + object: StrictStr + __properties: ClassVar[List[str]] = [ + "id", + "choices", + "usage", + "created", + "model", + "object", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["chat.completion"]): + raise ValueError("must be one of enum values ('chat.completion')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in choices (list) + _items = [] + if self.choices: + for _item_choices in self.choices: + if _item_choices: + _items.append(_item_choices.to_dict()) + _dict["choices"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "choices": ( + [ + ChatCompletionChoicesDataInner.from_dict(_item) + for _item in obj["choices"] + ] + if obj.get("choices") is not None + else None + ), + "usage": ( + UsageData.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + "created": obj.get("created"), + "model": obj.get("model"), + "object": obj.get("object"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_stream.py b/src/together/generated/models/chat_completion_stream.py new file mode 100644 index 00000000..7f3c93de --- /dev/null +++ b/src/together/generated/models/chat_completion_stream.py @@ -0,0 +1,169 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.chat_completion_event import ChatCompletionEvent +from together.generated.models.stream_sentinel import StreamSentinel +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +CHATCOMPLETIONSTREAM_ONE_OF_SCHEMAS = ["ChatCompletionEvent", "StreamSentinel"] + + +class ChatCompletionStream(BaseModel): + """ + ChatCompletionStream + """ + + # data type: ChatCompletionEvent + oneof_schema_1_validator: Optional[ChatCompletionEvent] = None + # data type: StreamSentinel + oneof_schema_2_validator: Optional[StreamSentinel] = None + actual_instance: Optional[Union[ChatCompletionEvent, StreamSentinel]] = None + one_of_schemas: Set[str] = {"ChatCompletionEvent", "StreamSentinel"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ChatCompletionStream.model_construct() + error_messages = [] + match = 0 + # validate data type: ChatCompletionEvent + if not isinstance(v, ChatCompletionEvent): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ChatCompletionEvent`" + ) + else: + match += 1 + # validate data type: StreamSentinel + if not isinstance(v, StreamSentinel): + error_messages.append( + f"Error! Input type `{type(v)}` is not `StreamSentinel`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into ChatCompletionEvent + try: + instance.actual_instance = ChatCompletionEvent.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into StreamSentinel + try: + instance.actual_instance = StreamSentinel.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ChatCompletionStream with oneOf schemas: ChatCompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], ChatCompletionEvent, StreamSentinel]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/chat_completion_system_message_param.py b/src/together/generated/models/chat_completion_system_message_param.py new file mode 100644 index 00000000..02c80038 --- /dev/null +++ b/src/together/generated/models/chat_completion_system_message_param.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionSystemMessageParam(BaseModel): + """ + ChatCompletionSystemMessageParam + """ # noqa: E501 + + content: StrictStr + role: StrictStr + name: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["content", "role", "name"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["system"]): + raise ValueError("must be one of enum values ('system')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionSystemMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionSystemMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "role": obj.get("role"), + "name": obj.get("name"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_token.py b/src/together/generated/models/chat_completion_token.py new file mode 100644 index 00000000..60862fec --- /dev/null +++ b/src/together/generated/models/chat_completion_token.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, +) +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionToken(BaseModel): + """ + ChatCompletionToken + """ # noqa: E501 + + id: StrictInt + text: StrictStr + logprob: Union[StrictFloat, StrictInt] + special: StrictBool + __properties: ClassVar[List[str]] = ["id", "text", "logprob", "special"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionToken from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionToken from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "text": obj.get("text"), + "logprob": obj.get("logprob"), + "special": obj.get("special"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_tool.py b/src/together/generated/models/chat_completion_tool.py new file mode 100644 index 00000000..05d66c4d --- /dev/null +++ b/src/together/generated/models/chat_completion_tool.py @@ -0,0 +1,106 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.chat_completion_tool_function import ( + ChatCompletionToolFunction, +) +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionTool(BaseModel): + """ + ChatCompletionTool + """ # noqa: E501 + + type: StrictStr + function: ChatCompletionToolFunction + __properties: ClassVar[List[str]] = ["type", "function"] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["function"]): + raise ValueError("must be one of enum values ('function')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionTool from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of function + if self.function: + _dict["function"] = self.function.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionTool from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "type": obj.get("type"), + "function": ( + ChatCompletionToolFunction.from_dict(obj["function"]) + if obj.get("function") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_tool_function.py b/src/together/generated/models/chat_completion_tool_function.py new file mode 100644 index 00000000..3ebaa385 --- /dev/null +++ b/src/together/generated/models/chat_completion_tool_function.py @@ -0,0 +1,91 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionToolFunction(BaseModel): + """ + ChatCompletionToolFunction + """ # noqa: E501 + + description: Optional[StrictStr] = None + name: StrictStr + parameters: Optional[Dict[str, Any]] = None + __properties: ClassVar[List[str]] = ["description", "name", "parameters"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionToolFunction from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionToolFunction from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "description": obj.get("description"), + "name": obj.get("name"), + "parameters": obj.get("parameters"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_tool_message_param.py b/src/together/generated/models/chat_completion_tool_message_param.py new file mode 100644 index 00000000..115b718b --- /dev/null +++ b/src/together/generated/models/chat_completion_tool_message_param.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionToolMessageParam(BaseModel): + """ + ChatCompletionToolMessageParam + """ # noqa: E501 + + role: StrictStr + content: StrictStr + tool_call_id: StrictStr + __properties: ClassVar[List[str]] = ["role", "content", "tool_call_id"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["tool"]): + raise ValueError("must be one of enum values ('tool')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionToolMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionToolMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "role": obj.get("role"), + "content": obj.get("content"), + "tool_call_id": obj.get("tool_call_id"), + } + ) + return _obj diff --git a/src/together/generated/models/chat_completion_user_message_param.py b/src/together/generated/models/chat_completion_user_message_param.py new file mode 100644 index 00000000..e02a998c --- /dev/null +++ b/src/together/generated/models/chat_completion_user_message_param.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ChatCompletionUserMessageParam(BaseModel): + """ + ChatCompletionUserMessageParam + """ # noqa: E501 + + content: StrictStr + role: StrictStr + name: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["content", "role", "name"] + + @field_validator("role") + def role_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["user"]): + raise ValueError("must be one of enum values ('user')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ChatCompletionUserMessageParam from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ChatCompletionUserMessageParam from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "role": obj.get("role"), + "name": obj.get("name"), + } + ) + return _obj diff --git a/src/together/generated/models/completion_choice.py b/src/together/generated/models/completion_choice.py new file mode 100644 index 00000000..0b1ac0a1 --- /dev/null +++ b/src/together/generated/models/completion_choice.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionChoice(BaseModel): + """ + CompletionChoice + """ # noqa: E501 + + text: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["text"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionChoice from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionChoice from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"text": obj.get("text")}) + return _obj diff --git a/src/together/generated/models/completion_choices_data_inner.py b/src/together/generated/models/completion_choices_data_inner.py new file mode 100644 index 00000000..b8cb4e7e --- /dev/null +++ b/src/together/generated/models/completion_choices_data_inner.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.finish_reason import FinishReason +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionChoicesDataInner(BaseModel): + """ + CompletionChoicesDataInner + """ # noqa: E501 + + text: Optional[StrictStr] = None + seed: Optional[StrictInt] = None + finish_reason: Optional[FinishReason] = None + logprobs: Optional[Dict[str, Any]] = None + __properties: ClassVar[List[str]] = ["text", "seed", "finish_reason", "logprobs"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionChoicesDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of logprobs + if self.logprobs: + _dict["logprobs"] = self.logprobs.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionChoicesDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "text": obj.get("text"), + "seed": obj.get("seed"), + "finish_reason": obj.get("finish_reason"), + "logprobs": ( + LogprobsPart.from_dict(obj["logprobs"]) + if obj.get("logprobs") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/completion_chunk.py b/src/together/generated/models/completion_chunk.py new file mode 100644 index 00000000..7a0b16bd --- /dev/null +++ b/src/together/generated/models/completion_chunk.py @@ -0,0 +1,139 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.completion_choice import CompletionChoice +from together.generated.models.completion_chunk_usage import CompletionChunkUsage +from together.generated.models.completion_token import CompletionToken +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionChunk(BaseModel): + """ + CompletionChunk + """ # noqa: E501 + + id: StrictStr + token: CompletionToken + choices: List[CompletionChoice] + usage: CompletionChunkUsage + seed: Optional[StrictInt] = None + finish_reason: Any + __properties: ClassVar[List[str]] = [ + "id", + "token", + "choices", + "usage", + "seed", + "finish_reason", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionChunk from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of token + if self.token: + _dict["token"] = self.token.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in choices (list) + _items = [] + if self.choices: + for _item_choices in self.choices: + if _item_choices: + _items.append(_item_choices.to_dict()) + _dict["choices"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + # override the default output from pydantic by calling `to_dict()` of finish_reason + if self.finish_reason: + _dict["finish_reason"] = self.finish_reason.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionChunk from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "token": ( + CompletionToken.from_dict(obj["token"]) + if obj.get("token") is not None + else None + ), + "choices": ( + [CompletionChoice.from_dict(_item) for _item in obj["choices"]] + if obj.get("choices") is not None + else None + ), + "usage": ( + CompletionChunkUsage.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + "seed": obj.get("seed"), + "finish_reason": ( + FinishReason.from_dict(obj["finish_reason"]) + if obj.get("finish_reason") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/completion_chunk_usage.py b/src/together/generated/models/completion_chunk_usage.py new file mode 100644 index 00000000..df7a48f6 --- /dev/null +++ b/src/together/generated/models/completion_chunk_usage.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionChunkUsage(BaseModel): + """ + CompletionChunkUsage + """ # noqa: E501 + + prompt_tokens: StrictInt + completion_tokens: StrictInt + total_tokens: StrictInt + __properties: ClassVar[List[str]] = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionChunkUsage from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionChunkUsage from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "prompt_tokens": obj.get("prompt_tokens"), + "completion_tokens": obj.get("completion_tokens"), + "total_tokens": obj.get("total_tokens"), + } + ) + return _obj diff --git a/src/together/generated/models/completion_event.py b/src/together/generated/models/completion_event.py new file mode 100644 index 00000000..5bd84c47 --- /dev/null +++ b/src/together/generated/models/completion_event.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.completion_chunk import CompletionChunk +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionEvent(BaseModel): + """ + CompletionEvent + """ # noqa: E501 + + data: CompletionChunk + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionEvent from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of data + if self.data: + _dict["data"] = self.data.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionEvent from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + CompletionChunk.from_dict(obj["data"]) + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/completion_request.py b/src/together/generated/models/completion_request.py new file mode 100644 index 00000000..065bef8b --- /dev/null +++ b/src/together/generated/models/completion_request.py @@ -0,0 +1,212 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing_extensions import Annotated +from together.generated.models.completion_request_model import CompletionRequestModel +from together.generated.models.completion_request_safety_model import ( + CompletionRequestSafetyModel, +) +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionRequest(BaseModel): + """ + CompletionRequest + """ # noqa: E501 + + prompt: StrictStr = Field( + description="A string providing context for the model to complete." + ) + model: CompletionRequestModel + max_tokens: Optional[StrictInt] = Field( + default=None, description="The maximum number of tokens to generate." + ) + stop: Optional[List[StrictStr]] = Field( + default=None, + description='A list of string sequences that will truncate (stop) inference text output. For example, "" will stop generation as soon as the model generates the given token.', + ) + temperature: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A decimal number from 0-1 that determines the degree of randomness in the response. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value closer to 1 introduces more randomness in the output.", + ) + top_p: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A percentage (also called the nucleus parameter) that's used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold below which all less likely tokens are filtered out. This technique helps maintain diversity and generate more fluent and natural-sounding text.", + ) + top_k: Optional[StrictInt] = Field( + default=None, + description="An integer that's used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.", + ) + repetition_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.", + ) + stream: Optional[StrictBool] = Field( + default=None, + description="If true, stream tokens as Server-Sent Events as the model generates them instead of waiting for the full model response. The stream terminates with `data: [DONE]`. If false, return a single JSON object containing the results.", + ) + logprobs: Optional[Annotated[int, Field(le=1, strict=True, ge=0)]] = Field( + default=None, + description="Determines the number of most likely tokens to return at each token position log probabilities to return.", + ) + echo: Optional[StrictBool] = Field( + default=None, + description="If true, the response will contain the prompt. Can be used with `logprobs` to return prompt logprobs.", + ) + n: Optional[Annotated[int, Field(le=128, strict=True, ge=1)]] = Field( + default=None, + description="The number of completions to generate for each prompt.", + ) + safety_model: Optional[CompletionRequestSafetyModel] = None + min_p: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between 0 and 1 that can be used as an alternative to top-p and top-k.", + ) + presence_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between -2.0 and 2.0 where a positive value increases the likelihood of a model talking about new topics.", + ) + frequency_penalty: Optional[Union[StrictFloat, StrictInt]] = Field( + default=None, + description="A number between -2.0 and 2.0 where a positive value decreases the likelihood of repeating tokens that have already been mentioned.", + ) + logit_bias: Optional[Dict[str, Union[StrictFloat, StrictInt]]] = Field( + default=None, + description="Adjusts the likelihood of specific tokens appearing in the generated output.", + ) + seed: Optional[StrictInt] = Field( + default=None, description="Seed value for reproducibility." + ) + __properties: ClassVar[List[str]] = [ + "prompt", + "model", + "max_tokens", + "stop", + "temperature", + "top_p", + "top_k", + "repetition_penalty", + "stream", + "logprobs", + "echo", + "n", + "safety_model", + "min_p", + "presence_penalty", + "frequency_penalty", + "logit_bias", + "seed", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of safety_model + if self.safety_model: + _dict["safety_model"] = self.safety_model.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "prompt": obj.get("prompt"), + "model": ( + CompletionRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "max_tokens": obj.get("max_tokens"), + "stop": obj.get("stop"), + "temperature": obj.get("temperature"), + "top_p": obj.get("top_p"), + "top_k": obj.get("top_k"), + "repetition_penalty": obj.get("repetition_penalty"), + "stream": obj.get("stream"), + "logprobs": obj.get("logprobs"), + "echo": obj.get("echo"), + "n": obj.get("n"), + "safety_model": ( + CompletionRequestSafetyModel.from_dict(obj["safety_model"]) + if obj.get("safety_model") is not None + else None + ), + "min_p": obj.get("min_p"), + "presence_penalty": obj.get("presence_penalty"), + "frequency_penalty": obj.get("frequency_penalty"), + "logit_bias": obj.get("logit_bias"), + "seed": obj.get("seed"), + } + ) + return _obj diff --git a/src/together/generated/models/completion_request_model.py b/src/together/generated/models/completion_request_model.py new file mode 100644 index 00000000..38d25705 --- /dev/null +++ b/src/together/generated/models/completion_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +COMPLETIONREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class CompletionRequestModel(BaseModel): + """ + The name of the model to query.

[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#chat-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = CompletionRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in CompletionRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into CompletionRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_request_safety_model.py b/src/together/generated/models/completion_request_safety_model.py new file mode 100644 index 00000000..981ee1e4 --- /dev/null +++ b/src/together/generated/models/completion_request_safety_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +COMPLETIONREQUESTSAFETYMODEL_ANY_OF_SCHEMAS = ["str"] + + +class CompletionRequestSafetyModel(BaseModel): + """ + The name of the moderation model used to validate tokens. Choose from the available moderation models found [here](https://docs.together.ai/docs/inference-models#moderation-models). + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = CompletionRequestSafetyModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in CompletionRequestSafetyModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into CompletionRequestSafetyModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_response.py b/src/together/generated/models/completion_response.py new file mode 100644 index 00000000..ac858b6d --- /dev/null +++ b/src/together/generated/models/completion_response.py @@ -0,0 +1,151 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.completion_choices_data_inner import ( + CompletionChoicesDataInner, +) +from together.generated.models.prompt_part_inner import PromptPartInner +from together.generated.models.usage_data import UsageData +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionResponse(BaseModel): + """ + CompletionResponse + """ # noqa: E501 + + id: StrictStr + choices: List[CompletionChoicesDataInner] + prompt: Optional[List[PromptPartInner]] = None + usage: UsageData + created: StrictInt + model: StrictStr + object: StrictStr + __properties: ClassVar[List[str]] = [ + "id", + "choices", + "prompt", + "usage", + "created", + "model", + "object", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["text_completion"]): + raise ValueError("must be one of enum values ('text_completion')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in choices (list) + _items = [] + if self.choices: + for _item_choices in self.choices: + if _item_choices: + _items.append(_item_choices.to_dict()) + _dict["choices"] = _items + # override the default output from pydantic by calling `to_dict()` of each item in prompt (list) + _items = [] + if self.prompt: + for _item_prompt in self.prompt: + if _item_prompt: + _items.append(_item_prompt.to_dict()) + _dict["prompt"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "choices": ( + [ + CompletionChoicesDataInner.from_dict(_item) + for _item in obj["choices"] + ] + if obj.get("choices") is not None + else None + ), + "prompt": ( + [PromptPartInner.from_dict(_item) for _item in obj["prompt"]] + if obj.get("prompt") is not None + else None + ), + "usage": ( + UsageData.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + "created": obj.get("created"), + "model": obj.get("model"), + "object": obj.get("object"), + } + ) + return _obj diff --git a/src/together/generated/models/completion_stream.py b/src/together/generated/models/completion_stream.py new file mode 100644 index 00000000..12e58c46 --- /dev/null +++ b/src/together/generated/models/completion_stream.py @@ -0,0 +1,169 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.completion_event import CompletionEvent +from together.generated.models.stream_sentinel import StreamSentinel +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +COMPLETIONSTREAM_ONE_OF_SCHEMAS = ["CompletionEvent", "StreamSentinel"] + + +class CompletionStream(BaseModel): + """ + CompletionStream + """ + + # data type: CompletionEvent + oneof_schema_1_validator: Optional[CompletionEvent] = None + # data type: StreamSentinel + oneof_schema_2_validator: Optional[StreamSentinel] = None + actual_instance: Optional[Union[CompletionEvent, StreamSentinel]] = None + one_of_schemas: Set[str] = {"CompletionEvent", "StreamSentinel"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = CompletionStream.model_construct() + error_messages = [] + match = 0 + # validate data type: CompletionEvent + if not isinstance(v, CompletionEvent): + error_messages.append( + f"Error! Input type `{type(v)}` is not `CompletionEvent`" + ) + else: + match += 1 + # validate data type: StreamSentinel + if not isinstance(v, StreamSentinel): + error_messages.append( + f"Error! Input type `{type(v)}` is not `StreamSentinel`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into CompletionEvent + try: + instance.actual_instance = CompletionEvent.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into StreamSentinel + try: + instance.actual_instance = StreamSentinel.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into CompletionStream with oneOf schemas: CompletionEvent, StreamSentinel. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], CompletionEvent, StreamSentinel]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/completion_token.py b/src/together/generated/models/completion_token.py new file mode 100644 index 00000000..de9a208d --- /dev/null +++ b/src/together/generated/models/completion_token.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, +) +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class CompletionToken(BaseModel): + """ + CompletionToken + """ # noqa: E501 + + id: StrictInt + text: StrictStr + logprob: Union[StrictFloat, StrictInt] + special: StrictBool + __properties: ClassVar[List[str]] = ["id", "text", "logprob", "special"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CompletionToken from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CompletionToken from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "text": obj.get("text"), + "logprob": obj.get("logprob"), + "special": obj.get("special"), + } + ) + return _obj diff --git a/src/together/generated/models/create_endpoint_request.py b/src/together/generated/models/create_endpoint_request.py new file mode 100644 index 00000000..70fc97b4 --- /dev/null +++ b/src/together/generated/models/create_endpoint_request.py @@ -0,0 +1,156 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.autoscaling import Autoscaling +from typing import Optional, Set +from typing_extensions import Self + + +class CreateEndpointRequest(BaseModel): + """ + CreateEndpointRequest + """ # noqa: E501 + + display_name: Optional[StrictStr] = Field( + default=None, description="A human-readable name for the endpoint" + ) + model: StrictStr = Field(description="The model to deploy on this endpoint") + hardware: StrictStr = Field( + description="The hardware configuration to use for this endpoint" + ) + autoscaling: Autoscaling = Field( + description="Configuration for automatic scaling of the endpoint" + ) + disable_prompt_cache: Optional[StrictBool] = Field( + default=False, + description="Whether to disable the prompt cache for this endpoint", + ) + disable_speculative_decoding: Optional[StrictBool] = Field( + default=False, + description="Whether to disable speculative decoding for this endpoint", + ) + state: Optional[StrictStr] = Field( + default="STARTED", description="The desired state of the endpoint" + ) + __properties: ClassVar[List[str]] = [ + "display_name", + "model", + "hardware", + "autoscaling", + "disable_prompt_cache", + "disable_speculative_decoding", + "state", + ] + + @field_validator("state") + def state_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["STARTED", "STOPPED"]): + raise ValueError("must be one of enum values ('STARTED', 'STOPPED')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of CreateEndpointRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of autoscaling + if self.autoscaling: + _dict["autoscaling"] = self.autoscaling.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of CreateEndpointRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "display_name": obj.get("display_name"), + "model": obj.get("model"), + "hardware": obj.get("hardware"), + "autoscaling": ( + Autoscaling.from_dict(obj["autoscaling"]) + if obj.get("autoscaling") is not None + else None + ), + "disable_prompt_cache": ( + obj.get("disable_prompt_cache") + if obj.get("disable_prompt_cache") is not None + else False + ), + "disable_speculative_decoding": ( + obj.get("disable_speculative_decoding") + if obj.get("disable_speculative_decoding") is not None + else False + ), + "state": ( + obj.get("state") if obj.get("state") is not None else "STARTED" + ), + } + ) + return _obj diff --git a/src/together/generated/models/dedicated_endpoint.py b/src/together/generated/models/dedicated_endpoint.py new file mode 100644 index 00000000..87ef87c5 --- /dev/null +++ b/src/together/generated/models/dedicated_endpoint.py @@ -0,0 +1,157 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.autoscaling import Autoscaling +from typing import Optional, Set +from typing_extensions import Self + + +class DedicatedEndpoint(BaseModel): + """ + Details about a dedicated endpoint deployment + """ # noqa: E501 + + object: StrictStr = Field(description="The type of object") + id: StrictStr = Field(description="Unique identifier for the endpoint") + name: StrictStr = Field(description="System name for the endpoint") + display_name: StrictStr = Field(description="Human-readable name for the endpoint") + model: StrictStr = Field(description="The model deployed on this endpoint") + hardware: StrictStr = Field( + description="The hardware configuration used for this endpoint" + ) + type: StrictStr = Field(description="The type of endpoint") + owner: StrictStr = Field(description="The owner of this endpoint") + state: StrictStr = Field(description="Current state of the endpoint") + autoscaling: Autoscaling = Field( + description="Configuration for automatic scaling of the endpoint" + ) + created_at: datetime = Field(description="Timestamp when the endpoint was created") + __properties: ClassVar[List[str]] = [ + "object", + "id", + "name", + "display_name", + "model", + "hardware", + "type", + "owner", + "state", + "autoscaling", + "created_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["endpoint"]): + raise ValueError("must be one of enum values ('endpoint')") + return value + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["dedicated"]): + raise ValueError("must be one of enum values ('dedicated')") + return value + + @field_validator("state") + def state_validate_enum(cls, value): + """Validates the enum""" + if value not in set( + ["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"] + ): + raise ValueError( + "must be one of enum values ('PENDING', 'STARTING', 'STARTED', 'STOPPING', 'STOPPED', 'ERROR')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of DedicatedEndpoint from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of autoscaling + if self.autoscaling: + _dict["autoscaling"] = self.autoscaling.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of DedicatedEndpoint from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "name": obj.get("name"), + "display_name": obj.get("display_name"), + "model": obj.get("model"), + "hardware": obj.get("hardware"), + "type": obj.get("type"), + "owner": obj.get("owner"), + "state": obj.get("state"), + "autoscaling": ( + Autoscaling.from_dict(obj["autoscaling"]) + if obj.get("autoscaling") is not None + else None + ), + "created_at": obj.get("created_at"), + } + ) + return _obj diff --git a/src/together/generated/models/embeddings_request.py b/src/together/generated/models/embeddings_request.py new file mode 100644 index 00000000..bad5473c --- /dev/null +++ b/src/together/generated/models/embeddings_request.py @@ -0,0 +1,105 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel +from typing import Optional, Set +from typing_extensions import Self + + +class EmbeddingsRequest(BaseModel): + """ + EmbeddingsRequest + """ # noqa: E501 + + model: EmbeddingsRequestModel + input: EmbeddingsRequestInput + __properties: ClassVar[List[str]] = ["model", "input"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of EmbeddingsRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of input + if self.input: + _dict["input"] = self.input.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of EmbeddingsRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "model": ( + EmbeddingsRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "input": ( + EmbeddingsRequestInput.from_dict(obj["input"]) + if obj.get("input") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/embeddings_request_input.py b/src/together/generated/models/embeddings_request_input.py new file mode 100644 index 00000000..3a4ec919 --- /dev/null +++ b/src/together/generated/models/embeddings_request_input.py @@ -0,0 +1,171 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +EMBEDDINGSREQUESTINPUT_ONE_OF_SCHEMAS = ["List[str]", "str"] + + +class EmbeddingsRequestInput(BaseModel): + """ + EmbeddingsRequestInput + """ + + # data type: str + oneof_schema_1_validator: Optional[StrictStr] = Field( + default=None, description="A string providing the text for the model to embed." + ) + # data type: List[str] + oneof_schema_2_validator: Optional[List[StrictStr]] = None + actual_instance: Optional[Union[List[str], str]] = None + one_of_schemas: Set[str] = {"List[str]", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = EmbeddingsRequestInput.model_construct() + error_messages = [] + match = 0 + # validate data type: str + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: List[str] + try: + instance.oneof_schema_2_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into str + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into List[str] + try: + # validation + instance.oneof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_2_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into EmbeddingsRequestInput with oneOf schemas: List[str], str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], List[str], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/embeddings_request_model.py b/src/together/generated/models/embeddings_request_model.py new file mode 100644 index 00000000..5a40eb92 --- /dev/null +++ b/src/together/generated/models/embeddings_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +EMBEDDINGSREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class EmbeddingsRequestModel(BaseModel): + """ + The name of the embedding model to use.

[See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = EmbeddingsRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in EmbeddingsRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into EmbeddingsRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/embeddings_response.py b/src/together/generated/models/embeddings_response.py new file mode 100644 index 00000000..cdf15928 --- /dev/null +++ b/src/together/generated/models/embeddings_response.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.embeddings_response_data_inner import ( + EmbeddingsResponseDataInner, +) +from typing import Optional, Set +from typing_extensions import Self + + +class EmbeddingsResponse(BaseModel): + """ + EmbeddingsResponse + """ # noqa: E501 + + object: StrictStr + model: StrictStr + data: List[EmbeddingsResponseDataInner] + __properties: ClassVar[List[str]] = ["object", "model", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of EmbeddingsResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of EmbeddingsResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "model": obj.get("model"), + "data": ( + [ + EmbeddingsResponseDataInner.from_dict(_item) + for _item in obj["data"] + ] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/embeddings_response_data_inner.py b/src/together/generated/models/embeddings_response_data_inner.py new file mode 100644 index 00000000..68e816ac --- /dev/null +++ b/src/together/generated/models/embeddings_response_data_inner.py @@ -0,0 +1,105 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class EmbeddingsResponseDataInner(BaseModel): + """ + EmbeddingsResponseDataInner + """ # noqa: E501 + + object: StrictStr + embedding: List[Union[StrictFloat, StrictInt]] + index: StrictInt + __properties: ClassVar[List[str]] = ["object", "embedding", "index"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["embedding"]): + raise ValueError("must be one of enum values ('embedding')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of EmbeddingsResponseDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of EmbeddingsResponseDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "embedding": obj.get("embedding"), + "index": obj.get("index"), + } + ) + return _obj diff --git a/src/together/generated/models/endpoint_pricing.py b/src/together/generated/models/endpoint_pricing.py new file mode 100644 index 00000000..847be535 --- /dev/null +++ b/src/together/generated/models/endpoint_pricing.py @@ -0,0 +1,85 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class EndpointPricing(BaseModel): + """ + Pricing details for using an endpoint + """ # noqa: E501 + + cents_per_minute: Union[StrictFloat, StrictInt] = Field( + description="Cost per minute of endpoint uptime in cents" + ) + __properties: ClassVar[List[str]] = ["cents_per_minute"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of EndpointPricing from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of EndpointPricing from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"cents_per_minute": obj.get("cents_per_minute")}) + return _obj diff --git a/src/together/generated/models/error_data.py b/src/together/generated/models/error_data.py new file mode 100644 index 00000000..0dd9d3ec --- /dev/null +++ b/src/together/generated/models/error_data.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.error_data_error import ErrorDataError +from typing import Optional, Set +from typing_extensions import Self + + +class ErrorData(BaseModel): + """ + ErrorData + """ # noqa: E501 + + error: ErrorDataError + __properties: ClassVar[List[str]] = ["error"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ErrorData from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of error + if self.error: + _dict["error"] = self.error.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ErrorData from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "error": ( + ErrorDataError.from_dict(obj["error"]) + if obj.get("error") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/error_data_error.py b/src/together/generated/models/error_data_error.py new file mode 100644 index 00000000..f43533ea --- /dev/null +++ b/src/together/generated/models/error_data_error.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ErrorDataError(BaseModel): + """ + ErrorDataError + """ # noqa: E501 + + message: StrictStr + type: StrictStr + param: Optional[StrictStr] = None + code: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["message", "type", "param", "code"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ErrorDataError from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ErrorDataError from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "message": obj.get("message"), + "type": obj.get("type"), + "param": obj.get("param"), + "code": obj.get("code"), + } + ) + return _obj diff --git a/src/together/generated/models/file_delete_response.py b/src/together/generated/models/file_delete_response.py new file mode 100644 index 00000000..07ad867e --- /dev/null +++ b/src/together/generated/models/file_delete_response.py @@ -0,0 +1,84 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class FileDeleteResponse(BaseModel): + """ + FileDeleteResponse + """ # noqa: E501 + + id: Optional[StrictStr] = None + deleted: Optional[StrictBool] = None + __properties: ClassVar[List[str]] = ["id", "deleted"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FileDeleteResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FileDeleteResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"id": obj.get("id"), "deleted": obj.get("deleted")}) + return _obj diff --git a/src/together/generated/models/file_list.py b/src/together/generated/models/file_list.py new file mode 100644 index 00000000..ca596491 --- /dev/null +++ b/src/together/generated/models/file_list.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.file_response import FileResponse +from typing import Optional, Set +from typing_extensions import Self + + +class FileList(BaseModel): + """ + FileList + """ # noqa: E501 + + data: List[FileResponse] + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FileList from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FileList from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + [FileResponse.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/file_object.py b/src/together/generated/models/file_object.py new file mode 100644 index 00000000..55a8b422 --- /dev/null +++ b/src/together/generated/models/file_object.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class FileObject(BaseModel): + """ + FileObject + """ # noqa: E501 + + object: Optional[StrictStr] = None + id: Optional[StrictStr] = None + filename: Optional[StrictStr] = None + size: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = ["object", "id", "filename", "size"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FileObject from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FileObject from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "filename": obj.get("filename"), + "size": obj.get("size"), + } + ) + return _obj diff --git a/src/together/generated/models/file_response.py b/src/together/generated/models/file_response.py new file mode 100644 index 00000000..7fe105e4 --- /dev/null +++ b/src/together/generated/models/file_response.py @@ -0,0 +1,135 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class FileResponse(BaseModel): + """ + FileResponse + """ # noqa: E501 + + id: StrictStr + object: StrictStr + created_at: StrictInt + filename: StrictStr + bytes: StrictInt + purpose: StrictStr + processed: StrictBool = Field(alias="Processed") + file_type: StrictStr = Field(alias="FileType") + line_count: StrictInt = Field(alias="LineCount") + __properties: ClassVar[List[str]] = [ + "id", + "object", + "created_at", + "filename", + "bytes", + "purpose", + "Processed", + "FileType", + "LineCount", + ] + + @field_validator("purpose") + def purpose_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["fine-tune"]): + raise ValueError("must be one of enum values ('fine-tune')") + return value + + @field_validator("file_type") + def file_type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["jsonl", "parquet"]): + raise ValueError("must be one of enum values ('jsonl', 'parquet')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FileResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FileResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "object": obj.get("object"), + "created_at": obj.get("created_at"), + "filename": obj.get("filename"), + "bytes": obj.get("bytes"), + "purpose": obj.get("purpose"), + "Processed": obj.get("Processed"), + "FileType": obj.get("FileType"), + "LineCount": obj.get("LineCount"), + } + ) + return _obj diff --git a/src/together/generated/models/fine_tune_event.py b/src/together/generated/models/fine_tune_event.py new file mode 100644 index 00000000..638272ab --- /dev/null +++ b/src/together/generated/models/fine_tune_event.py @@ -0,0 +1,137 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.finetune_event_levels import FinetuneEventLevels +from together.generated.models.finetune_event_type import FinetuneEventType +from typing import Optional, Set +from typing_extensions import Self + + +class FineTuneEvent(BaseModel): + """ + FineTuneEvent + """ # noqa: E501 + + object: StrictStr + created_at: StrictStr + level: Optional[FinetuneEventLevels] = None + message: StrictStr + type: FinetuneEventType + param_count: StrictInt + token_count: StrictInt + total_steps: StrictInt + wandb_url: StrictStr + step: StrictInt + checkpoint_path: StrictStr + model_path: StrictStr + training_offset: StrictInt + hash: StrictStr + __properties: ClassVar[List[str]] = [ + "object", + "created_at", + "level", + "message", + "type", + "param_count", + "token_count", + "total_steps", + "wandb_url", + "step", + "checkpoint_path", + "model_path", + "training_offset", + "hash", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["fine-tune-event"]): + raise ValueError("must be one of enum values ('fine-tune-event')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FineTuneEvent from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FineTuneEvent from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "created_at": obj.get("created_at"), + "level": obj.get("level"), + "message": obj.get("message"), + "type": obj.get("type"), + "param_count": obj.get("param_count"), + "token_count": obj.get("token_count"), + "total_steps": obj.get("total_steps"), + "wandb_url": obj.get("wandb_url"), + "step": obj.get("step"), + "checkpoint_path": obj.get("checkpoint_path"), + "model_path": obj.get("model_path"), + "training_offset": obj.get("training_offset"), + "hash": obj.get("hash"), + } + ) + return _obj diff --git a/src/together/generated/models/fine_tunes_post_request.py b/src/together/generated/models/fine_tunes_post_request.py new file mode 100644 index 00000000..215ca080 --- /dev/null +++ b/src/together/generated/models/fine_tunes_post_request.py @@ -0,0 +1,233 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.fine_tunes_post_request_train_on_inputs import ( + FineTunesPostRequestTrainOnInputs, +) +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) +from typing import Optional, Set +from typing_extensions import Self + + +class FineTunesPostRequest(BaseModel): + """ + FineTunesPostRequest + """ # noqa: E501 + + training_file: StrictStr = Field( + description="File-ID of a training file uploaded to the Together API" + ) + validation_file: Optional[StrictStr] = Field( + default=None, + description="File-ID of a validation file uploaded to the Together API", + ) + model: StrictStr = Field( + description="Name of the base model to run fine-tune job on" + ) + n_epochs: Optional[StrictInt] = Field( + default=1, description="Number of epochs for fine-tuning" + ) + n_checkpoints: Optional[StrictInt] = Field( + default=1, description="Number of checkpoints to save during fine-tuning" + ) + n_evals: Optional[StrictInt] = Field( + default=0, + description="Number of evaluations to be run on a given validation set during training", + ) + batch_size: Optional[StrictInt] = Field( + default=32, description="Batch size for fine-tuning" + ) + learning_rate: Optional[Union[StrictFloat, StrictInt]] = Field( + default=0.000010, description="Learning rate multiplier to use for training" + ) + lr_scheduler: Optional[Dict[str, Any]] = None + warmup_ratio: Optional[Union[StrictFloat, StrictInt]] = Field( + default=0.0, + description="The percent of steps at the start of training to linearly increase the learning rate.", + ) + max_grad_norm: Optional[Union[StrictFloat, StrictInt]] = Field( + default=1.0, + description="Max gradient norm to be used for gradient clipping. Set to 0 to disable.", + ) + weight_decay: Optional[Union[StrictFloat, StrictInt]] = Field( + default=0.0, description="Weight decay" + ) + suffix: Optional[StrictStr] = Field( + default=None, + description="Suffix that will be added to your fine-tuned model name", + ) + wandb_api_key: Optional[StrictStr] = Field( + default=None, description="API key for Weights & Biases integration" + ) + wandb_base_url: Optional[StrictStr] = Field( + default=None, + description="The base URL of a dedicated Weights & Biases instance.", + ) + wandb_project_name: Optional[StrictStr] = Field( + default=None, + description="The Weights & Biases project for your run. If not specified, will use `together` as the project name.", + ) + wandb_name: Optional[StrictStr] = Field( + default=None, description="The Weights & Biases name for your run." + ) + train_on_inputs: Optional[FineTunesPostRequestTrainOnInputs] = False + training_type: Optional[FineTunesPostRequestTrainingType] = None + __properties: ClassVar[List[str]] = [ + "training_file", + "validation_file", + "model", + "n_epochs", + "n_checkpoints", + "n_evals", + "batch_size", + "learning_rate", + "lr_scheduler", + "warmup_ratio", + "max_grad_norm", + "weight_decay", + "suffix", + "wandb_api_key", + "wandb_base_url", + "wandb_project_name", + "wandb_name", + "train_on_inputs", + "training_type", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FineTunesPostRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of lr_scheduler + if self.lr_scheduler: + _dict["lr_scheduler"] = self.lr_scheduler.to_dict() + # override the default output from pydantic by calling `to_dict()` of train_on_inputs + if self.train_on_inputs: + _dict["train_on_inputs"] = self.train_on_inputs.to_dict() + # override the default output from pydantic by calling `to_dict()` of training_type + if self.training_type: + _dict["training_type"] = self.training_type.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FineTunesPostRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "training_file": obj.get("training_file"), + "validation_file": obj.get("validation_file"), + "model": obj.get("model"), + "n_epochs": ( + obj.get("n_epochs") if obj.get("n_epochs") is not None else 1 + ), + "n_checkpoints": ( + obj.get("n_checkpoints") + if obj.get("n_checkpoints") is not None + else 1 + ), + "n_evals": obj.get("n_evals") if obj.get("n_evals") is not None else 0, + "batch_size": ( + obj.get("batch_size") if obj.get("batch_size") is not None else 32 + ), + "learning_rate": ( + obj.get("learning_rate") + if obj.get("learning_rate") is not None + else 0.000010 + ), + "lr_scheduler": ( + LRScheduler.from_dict(obj["lr_scheduler"]) + if obj.get("lr_scheduler") is not None + else None + ), + "warmup_ratio": ( + obj.get("warmup_ratio") + if obj.get("warmup_ratio") is not None + else 0.0 + ), + "max_grad_norm": ( + obj.get("max_grad_norm") + if obj.get("max_grad_norm") is not None + else 1.0 + ), + "weight_decay": ( + obj.get("weight_decay") + if obj.get("weight_decay") is not None + else 0.0 + ), + "suffix": obj.get("suffix"), + "wandb_api_key": obj.get("wandb_api_key"), + "wandb_base_url": obj.get("wandb_base_url"), + "wandb_project_name": obj.get("wandb_project_name"), + "wandb_name": obj.get("wandb_name"), + "train_on_inputs": ( + FineTunesPostRequestTrainOnInputs.from_dict(obj["train_on_inputs"]) + if obj.get("train_on_inputs") is not None + else None + ), + "training_type": ( + FineTunesPostRequestTrainingType.from_dict(obj["training_type"]) + if obj.get("training_type") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py b/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py new file mode 100644 index 00000000..4c5e7c3c --- /dev/null +++ b/src/together/generated/models/fine_tunes_post_request_train_on_inputs.py @@ -0,0 +1,170 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +FINETUNESPOSTREQUESTTRAINONINPUTS_ONE_OF_SCHEMAS = ["bool", "str"] + + +class FineTunesPostRequestTrainOnInputs(BaseModel): + """ + Whether to mask the user messages in conversational data or prompts in instruction data. + """ + + # data type: bool + oneof_schema_1_validator: Optional[StrictBool] = None + # data type: str + oneof_schema_2_validator: Optional[StrictStr] = None + actual_instance: Optional[Union[bool, str]] = None + one_of_schemas: Set[str] = {"bool", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = FineTunesPostRequestTrainOnInputs.model_construct() + error_messages = [] + match = 0 + # validate data type: bool + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.oneof_schema_2_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into bool + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.oneof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_2_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into FineTunesPostRequestTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], bool, str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/fine_tunes_post_request_training_type.py b/src/together/generated/models/fine_tunes_post_request_training_type.py new file mode 100644 index 00000000..8d4b6906 --- /dev/null +++ b/src/together/generated/models/fine_tunes_post_request_training_type.py @@ -0,0 +1,172 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.full_training_type import FullTrainingType +from together.generated.models.lo_ra_training_type import LoRATrainingType +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +FINETUNESPOSTREQUESTTRAININGTYPE_ONE_OF_SCHEMAS = [ + "FullTrainingType", + "LoRATrainingType", +] + + +class FineTunesPostRequestTrainingType(BaseModel): + """ + FineTunesPostRequestTrainingType + """ + + # data type: FullTrainingType + oneof_schema_1_validator: Optional[FullTrainingType] = None + # data type: LoRATrainingType + oneof_schema_2_validator: Optional[LoRATrainingType] = None + actual_instance: Optional[Union[FullTrainingType, LoRATrainingType]] = None + one_of_schemas: Set[str] = {"FullTrainingType", "LoRATrainingType"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = FineTunesPostRequestTrainingType.model_construct() + error_messages = [] + match = 0 + # validate data type: FullTrainingType + if not isinstance(v, FullTrainingType): + error_messages.append( + f"Error! Input type `{type(v)}` is not `FullTrainingType`" + ) + else: + match += 1 + # validate data type: LoRATrainingType + if not isinstance(v, LoRATrainingType): + error_messages.append( + f"Error! Input type `{type(v)}` is not `LoRATrainingType`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into FullTrainingType + try: + instance.actual_instance = FullTrainingType.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into LoRATrainingType + try: + instance.actual_instance = LoRATrainingType.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into FineTunesPostRequestTrainingType with oneOf schemas: FullTrainingType, LoRATrainingType. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], FullTrainingType, LoRATrainingType]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/finetune_download_result.py b/src/together/generated/models/finetune_download_result.py new file mode 100644 index 00000000..ddebbc70 --- /dev/null +++ b/src/together/generated/models/finetune_download_result.py @@ -0,0 +1,116 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class FinetuneDownloadResult(BaseModel): + """ + FinetuneDownloadResult + """ # noqa: E501 + + object: Optional[StrictStr] = None + id: Optional[StrictStr] = None + checkpoint_step: Optional[StrictInt] = None + filename: Optional[StrictStr] = None + size: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = [ + "object", + "id", + "checkpoint_step", + "filename", + "size", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["local"]): + raise ValueError("must be one of enum values ('local')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FinetuneDownloadResult from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # set to None if object (nullable) is None + # and model_fields_set contains the field + if self.object is None and "object" in self.model_fields_set: + _dict["object"] = None + + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FinetuneDownloadResult from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "checkpoint_step": obj.get("checkpoint_step"), + "filename": obj.get("filename"), + "size": obj.get("size"), + } + ) + return _obj diff --git a/src/together/generated/models/finetune_event_levels.py b/src/together/generated/models/finetune_event_levels.py new file mode 100644 index 00000000..ee263088 --- /dev/null +++ b/src/together/generated/models/finetune_event_levels.py @@ -0,0 +1,39 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +from enum import Enum +from typing_extensions import Self + + +class FinetuneEventLevels(str, Enum): + """ + FinetuneEventLevels + """ + + """ + allowed enum values + """ + INFO = "info" + WARNING = "warning" + ERROR = "error" + LEGACY_INFO = "legacy_info" + LEGACY_IWARNING = "legacy_iwarning" + LEGACY_IERROR = "legacy_ierror" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of FinetuneEventLevels from a JSON string""" + return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_event_type.py b/src/together/generated/models/finetune_event_type.py new file mode 100644 index 00000000..8f65293b --- /dev/null +++ b/src/together/generated/models/finetune_event_type.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +from enum import Enum +from typing_extensions import Self + + +class FinetuneEventType(str, Enum): + """ + FinetuneEventType + """ + + """ + allowed enum values + """ + JOB_PENDING = "job_pending" + JOB_START = "job_start" + JOB_STOPPED = "job_stopped" + MODEL_DOWNLOADING = "model_downloading" + MODEL_DOWNLOAD_COMPLETE = "model_download_complete" + TRAINING_DATA_DOWNLOADING = "training_data_downloading" + TRAINING_DATA_DOWNLOAD_COMPLETE = "training_data_download_complete" + VALIDATION_DATA_DOWNLOADING = "validation_data_downloading" + VALIDATION_DATA_DOWNLOAD_COMPLETE = "validation_data_download_complete" + WANDB_INIT = "wandb_init" + TRAINING_START = "training_start" + CHECKPOINT_SAVE = "checkpoint_save" + BILLING_LIMIT = "billing_limit" + EPOCH_COMPLETE = "epoch_complete" + TRAINING_COMPLETE = "training_complete" + MODEL_COMPRESSING = "model_compressing" + MODEL_COMPRESSION_COMPLETE = "model_compression_complete" + MODEL_UPLOADING = "model_uploading" + MODEL_UPLOAD_COMPLETE = "model_upload_complete" + JOB_COMPLETE = "job_complete" + JOB_ERROR = "job_error" + CANCEL_REQUESTED = "cancel_requested" + JOB_RESTARTED = "job_restarted" + REFUND = "refund" + WARNING = "warning" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of FinetuneEventType from a JSON string""" + return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_job_status.py b/src/together/generated/models/finetune_job_status.py new file mode 100644 index 00000000..97b5dd1c --- /dev/null +++ b/src/together/generated/models/finetune_job_status.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +from enum import Enum +from typing_extensions import Self + + +class FinetuneJobStatus(str, Enum): + """ + FinetuneJobStatus + """ + + """ + allowed enum values + """ + PENDING = "pending" + QUEUED = "queued" + RUNNING = "running" + COMPRESSING = "compressing" + UPLOADING = "uploading" + CANCEL_REQUESTED = "cancel_requested" + CANCELLED = "cancelled" + ERROR = "error" + COMPLETED = "completed" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of FinetuneJobStatus from a JSON string""" + return cls(json.loads(json_str)) diff --git a/src/together/generated/models/finetune_list.py b/src/together/generated/models/finetune_list.py new file mode 100644 index 00000000..2a20ba4c --- /dev/null +++ b/src/together/generated/models/finetune_list.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.finetune_response import FinetuneResponse +from typing import Optional, Set +from typing_extensions import Self + + +class FinetuneList(BaseModel): + """ + FinetuneList + """ # noqa: E501 + + data: List[FinetuneResponse] + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FinetuneList from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FinetuneList from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + [FinetuneResponse.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/finetune_list_events.py b/src/together/generated/models/finetune_list_events.py new file mode 100644 index 00000000..c4266c1c --- /dev/null +++ b/src/together/generated/models/finetune_list_events.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict +from typing import Any, ClassVar, Dict, List +from together.generated.models.fine_tune_event import FineTuneEvent +from typing import Optional, Set +from typing_extensions import Self + + +class FinetuneListEvents(BaseModel): + """ + FinetuneListEvents + """ # noqa: E501 + + data: List[FineTuneEvent] + __properties: ClassVar[List[str]] = ["data"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FinetuneListEvents from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FinetuneListEvents from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "data": ( + [FineTuneEvent.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ) + } + ) + return _obj diff --git a/src/together/generated/models/finetune_response.py b/src/together/generated/models/finetune_response.py new file mode 100644 index 00000000..a1055827 --- /dev/null +++ b/src/together/generated/models/finetune_response.py @@ -0,0 +1,222 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.fine_tune_event import FineTuneEvent +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) +from together.generated.models.finetune_job_status import FinetuneJobStatus +from together.generated.models.finetune_response_train_on_inputs import ( + FinetuneResponseTrainOnInputs, +) +from typing import Optional, Set +from typing_extensions import Self + + +class FinetuneResponse(BaseModel): + """ + FinetuneResponse + """ # noqa: E501 + + id: StrictStr + training_file: Optional[StrictStr] = None + validation_file: Optional[StrictStr] = None + model: Optional[StrictStr] = None + model_output_name: Optional[StrictStr] = None + model_output_path: Optional[StrictStr] = None + trainingfile_numlines: Optional[StrictInt] = None + trainingfile_size: Optional[StrictInt] = None + created_at: Optional[StrictStr] = None + updated_at: Optional[StrictStr] = None + n_epochs: Optional[StrictInt] = None + n_checkpoints: Optional[StrictInt] = None + n_evals: Optional[StrictInt] = None + batch_size: Optional[StrictInt] = None + learning_rate: Optional[Union[StrictFloat, StrictInt]] = None + lr_scheduler: Optional[Dict[str, Any]] = None + warmup_ratio: Optional[Union[StrictFloat, StrictInt]] = None + max_grad_norm: Optional[Union[StrictFloat, StrictInt]] = None + weight_decay: Optional[Union[StrictFloat, StrictInt]] = None + eval_steps: Optional[StrictInt] = None + train_on_inputs: Optional[FinetuneResponseTrainOnInputs] = None + training_type: Optional[FineTunesPostRequestTrainingType] = None + status: FinetuneJobStatus + job_id: Optional[StrictStr] = None + events: Optional[List[FineTuneEvent]] = None + token_count: Optional[StrictInt] = None + param_count: Optional[StrictInt] = None + total_price: Optional[StrictInt] = None + epochs_completed: Optional[StrictInt] = None + queue_depth: Optional[StrictInt] = None + wandb_project_name: Optional[StrictStr] = None + wandb_url: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [ + "id", + "training_file", + "validation_file", + "model", + "model_output_name", + "model_output_path", + "trainingfile_numlines", + "trainingfile_size", + "created_at", + "updated_at", + "n_epochs", + "n_checkpoints", + "n_evals", + "batch_size", + "learning_rate", + "lr_scheduler", + "warmup_ratio", + "max_grad_norm", + "weight_decay", + "eval_steps", + "train_on_inputs", + "training_type", + "status", + "job_id", + "events", + "token_count", + "param_count", + "total_price", + "epochs_completed", + "queue_depth", + "wandb_project_name", + "wandb_url", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FinetuneResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of lr_scheduler + if self.lr_scheduler: + _dict["lr_scheduler"] = self.lr_scheduler.to_dict() + # override the default output from pydantic by calling `to_dict()` of train_on_inputs + if self.train_on_inputs: + _dict["train_on_inputs"] = self.train_on_inputs.to_dict() + # override the default output from pydantic by calling `to_dict()` of training_type + if self.training_type: + _dict["training_type"] = self.training_type.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in events (list) + _items = [] + if self.events: + for _item_events in self.events: + if _item_events: + _items.append(_item_events.to_dict()) + _dict["events"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FinetuneResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "training_file": obj.get("training_file"), + "validation_file": obj.get("validation_file"), + "model": obj.get("model"), + "model_output_name": obj.get("model_output_name"), + "model_output_path": obj.get("model_output_path"), + "trainingfile_numlines": obj.get("trainingfile_numlines"), + "trainingfile_size": obj.get("trainingfile_size"), + "created_at": obj.get("created_at"), + "updated_at": obj.get("updated_at"), + "n_epochs": obj.get("n_epochs"), + "n_checkpoints": obj.get("n_checkpoints"), + "n_evals": obj.get("n_evals"), + "batch_size": obj.get("batch_size"), + "learning_rate": obj.get("learning_rate"), + "lr_scheduler": ( + LRScheduler.from_dict(obj["lr_scheduler"]) + if obj.get("lr_scheduler") is not None + else None + ), + "warmup_ratio": obj.get("warmup_ratio"), + "max_grad_norm": obj.get("max_grad_norm"), + "weight_decay": obj.get("weight_decay"), + "eval_steps": obj.get("eval_steps"), + "train_on_inputs": ( + FinetuneResponseTrainOnInputs.from_dict(obj["train_on_inputs"]) + if obj.get("train_on_inputs") is not None + else None + ), + "training_type": ( + FineTunesPostRequestTrainingType.from_dict(obj["training_type"]) + if obj.get("training_type") is not None + else None + ), + "status": obj.get("status"), + "job_id": obj.get("job_id"), + "events": ( + [FineTuneEvent.from_dict(_item) for _item in obj["events"]] + if obj.get("events") is not None + else None + ), + "token_count": obj.get("token_count"), + "param_count": obj.get("param_count"), + "total_price": obj.get("total_price"), + "epochs_completed": obj.get("epochs_completed"), + "queue_depth": obj.get("queue_depth"), + "wandb_project_name": obj.get("wandb_project_name"), + "wandb_url": obj.get("wandb_url"), + } + ) + return _obj diff --git a/src/together/generated/models/finetune_response_train_on_inputs.py b/src/together/generated/models/finetune_response_train_on_inputs.py new file mode 100644 index 00000000..44ff7e8a --- /dev/null +++ b/src/together/generated/models/finetune_response_train_on_inputs.py @@ -0,0 +1,170 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictBool, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +FINETUNERESPONSETRAINONINPUTS_ONE_OF_SCHEMAS = ["bool", "str"] + + +class FinetuneResponseTrainOnInputs(BaseModel): + """ + FinetuneResponseTrainOnInputs + """ + + # data type: bool + oneof_schema_1_validator: Optional[StrictBool] = None + # data type: str + oneof_schema_2_validator: Optional[StrictStr] = None + actual_instance: Optional[Union[bool, str]] = None + one_of_schemas: Set[str] = {"bool", "str"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = FinetuneResponseTrainOnInputs.model_construct() + error_messages = [] + match = 0 + # validate data type: bool + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.oneof_schema_2_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into bool + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.oneof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_2_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into FinetuneResponseTrainOnInputs with oneOf schemas: bool, str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], bool, str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/finish_reason.py b/src/together/generated/models/finish_reason.py new file mode 100644 index 00000000..4d88eced --- /dev/null +++ b/src/together/generated/models/finish_reason.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +from enum import Enum +from typing_extensions import Self + + +class FinishReason(str, Enum): + """ + FinishReason + """ + + """ + allowed enum values + """ + STOP = "stop" + EOS = "eos" + LENGTH = "length" + TOOL_CALLS = "tool_calls" + FUNCTION_CALL = "function_call" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of FinishReason from a JSON string""" + return cls(json.loads(json_str)) diff --git a/src/together/generated/models/full_training_type.py b/src/together/generated/models/full_training_type.py new file mode 100644 index 00000000..6999096a --- /dev/null +++ b/src/together/generated/models/full_training_type.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class FullTrainingType(BaseModel): + """ + FullTrainingType + """ # noqa: E501 + + type: StrictStr + __properties: ClassVar[List[str]] = ["type"] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["Full"]): + raise ValueError("must be one of enum values ('Full')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of FullTrainingType from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of FullTrainingType from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"type": obj.get("type")}) + return _obj diff --git a/src/together/generated/models/hardware_availability.py b/src/together/generated/models/hardware_availability.py new file mode 100644 index 00000000..53a166cf --- /dev/null +++ b/src/together/generated/models/hardware_availability.py @@ -0,0 +1,94 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class HardwareAvailability(BaseModel): + """ + Indicates the current availability status of a hardware configuration + """ # noqa: E501 + + status: StrictStr = Field( + description="The availability status of the hardware configuration" + ) + __properties: ClassVar[List[str]] = ["status"] + + @field_validator("status") + def status_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["available", "unavailable", "insufficient"]): + raise ValueError( + "must be one of enum values ('available', 'unavailable', 'insufficient')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of HardwareAvailability from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of HardwareAvailability from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"status": obj.get("status")}) + return _obj diff --git a/src/together/generated/models/hardware_spec.py b/src/together/generated/models/hardware_spec.py new file mode 100644 index 00000000..10d0058a --- /dev/null +++ b/src/together/generated/models/hardware_spec.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class HardwareSpec(BaseModel): + """ + Detailed specifications of a hardware configuration + """ # noqa: E501 + + gpu_type: StrictStr = Field(description="The type/model of GPU") + gpu_link: StrictStr = Field(description="The GPU interconnect technology") + gpu_memory: Union[StrictFloat, StrictInt] = Field( + description="Amount of GPU memory in GB" + ) + gpu_count: StrictInt = Field(description="Number of GPUs in this configuration") + __properties: ClassVar[List[str]] = [ + "gpu_type", + "gpu_link", + "gpu_memory", + "gpu_count", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of HardwareSpec from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of HardwareSpec from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "gpu_type": obj.get("gpu_type"), + "gpu_link": obj.get("gpu_link"), + "gpu_memory": obj.get("gpu_memory"), + "gpu_count": obj.get("gpu_count"), + } + ) + return _obj diff --git a/src/together/generated/models/hardware_with_status.py b/src/together/generated/models/hardware_with_status.py new file mode 100644 index 00000000..46680485 --- /dev/null +++ b/src/together/generated/models/hardware_with_status.py @@ -0,0 +1,140 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.hardware_availability import HardwareAvailability +from together.generated.models.hardware_spec import HardwareSpec +from typing import Optional, Set +from typing_extensions import Self + + +class HardwareWithStatus(BaseModel): + """ + Hardware configuration details including current availability status + """ # noqa: E501 + + object: StrictStr + name: StrictStr = Field( + description="Unique identifier for the hardware configuration" + ) + pricing: EndpointPricing + specs: HardwareSpec + availability: Optional[HardwareAvailability] = None + updated_at: datetime = Field( + description="Timestamp of when the hardware status was last updated" + ) + __properties: ClassVar[List[str]] = [ + "object", + "name", + "pricing", + "specs", + "availability", + "updated_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["hardware"]): + raise ValueError("must be one of enum values ('hardware')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of HardwareWithStatus from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of pricing + if self.pricing: + _dict["pricing"] = self.pricing.to_dict() + # override the default output from pydantic by calling `to_dict()` of specs + if self.specs: + _dict["specs"] = self.specs.to_dict() + # override the default output from pydantic by calling `to_dict()` of availability + if self.availability: + _dict["availability"] = self.availability.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of HardwareWithStatus from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "name": obj.get("name"), + "pricing": ( + EndpointPricing.from_dict(obj["pricing"]) + if obj.get("pricing") is not None + else None + ), + "specs": ( + HardwareSpec.from_dict(obj["specs"]) + if obj.get("specs") is not None + else None + ), + "availability": ( + HardwareAvailability.from_dict(obj["availability"]) + if obj.get("availability") is not None + else None + ), + "updated_at": obj.get("updated_at"), + } + ) + return _obj diff --git a/src/together/generated/models/image_response.py b/src/together/generated/models/image_response.py new file mode 100644 index 00000000..35b81ea4 --- /dev/null +++ b/src/together/generated/models/image_response.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.image_response_data_inner import ImageResponseDataInner +from typing import Optional, Set +from typing_extensions import Self + + +class ImageResponse(BaseModel): + """ + ImageResponse + """ # noqa: E501 + + id: StrictStr + model: StrictStr + object: StrictStr + data: List[Optional[ImageResponseDataInner]] + __properties: ClassVar[List[str]] = ["id", "model", "object", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ImageResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ImageResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "model": obj.get("model"), + "object": obj.get("object"), + "data": ( + [ImageResponseDataInner.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/image_response_data_inner.py b/src/together/generated/models/image_response_data_inner.py new file mode 100644 index 00000000..4e625f45 --- /dev/null +++ b/src/together/generated/models/image_response_data_inner.py @@ -0,0 +1,85 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ImageResponseDataInner(BaseModel): + """ + ImageResponseDataInner + """ # noqa: E501 + + index: StrictInt + b64_json: Optional[StrictStr] = None + url: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ImageResponseDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ImageResponseDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({}) + return _obj diff --git a/src/together/generated/models/images_generations_post_request.py b/src/together/generated/models/images_generations_post_request.py new file mode 100644 index 00000000..9ccc6fe9 --- /dev/null +++ b/src/together/generated/models/images_generations_post_request.py @@ -0,0 +1,217 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from together.generated.models.images_generations_post_request_image_loras_inner import ( + ImagesGenerationsPostRequestImageLorasInner, +) +from together.generated.models.images_generations_post_request_model import ( + ImagesGenerationsPostRequestModel, +) +from typing import Optional, Set +from typing_extensions import Self + + +class ImagesGenerationsPostRequest(BaseModel): + """ + ImagesGenerationsPostRequest + """ # noqa: E501 + + prompt: StrictStr = Field( + description="A description of the desired images. Maximum length varies by model." + ) + model: ImagesGenerationsPostRequestModel + steps: Optional[StrictInt] = Field( + default=20, description="Number of generation steps." + ) + image_url: Optional[StrictStr] = Field( + default=None, + description="URL of an image to use for image models that support it.", + ) + seed: Optional[StrictInt] = Field( + default=None, + description="Seed used for generation. Can be used to reproduce image generations.", + ) + n: Optional[StrictInt] = Field( + default=1, description="Number of image results to generate." + ) + height: Optional[StrictInt] = Field( + default=1024, description="Height of the image to generate in number of pixels." + ) + width: Optional[StrictInt] = Field( + default=1024, description="Width of the image to generate in number of pixels." + ) + negative_prompt: Optional[StrictStr] = Field( + default=None, + description="The prompt or prompts not to guide the image generation.", + ) + response_format: Optional[StrictStr] = Field( + default=None, + description="Format of the image response. Can be either a base64 string or a URL.", + ) + guidance: Optional[Union[StrictFloat, StrictInt]] = Field( + default=3.5, + description="Adjusts the alignment of the generated image with the input prompt. Higher values (e.g., 8-10) make the output more faithful to the prompt, while lower values (e.g., 1-5) encourage more creative freedom.", + ) + output_format: Optional[StrictStr] = Field( + default="jpeg", + description="The format of the image response. Can be either be `jpeg` or `png`. Defaults to `jpeg`.", + ) + image_loras: Optional[List[ImagesGenerationsPostRequestImageLorasInner]] = Field( + default=None, + description="An array of objects that define LoRAs (Low-Rank Adaptations) to influence the generated image.", + ) + __properties: ClassVar[List[str]] = [ + "prompt", + "model", + "steps", + "image_url", + "seed", + "n", + "height", + "width", + "negative_prompt", + "response_format", + "guidance", + "output_format", + "image_loras", + ] + + @field_validator("response_format") + def response_format_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["base64", "url"]): + raise ValueError("must be one of enum values ('base64', 'url')") + return value + + @field_validator("output_format") + def output_format_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["jpeg", "png"]): + raise ValueError("must be one of enum values ('jpeg', 'png')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ImagesGenerationsPostRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of each item in image_loras (list) + _items = [] + if self.image_loras: + for _item_image_loras in self.image_loras: + if _item_image_loras: + _items.append(_item_image_loras.to_dict()) + _dict["image_loras"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ImagesGenerationsPostRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "prompt": obj.get("prompt"), + "model": ( + ImagesGenerationsPostRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "steps": obj.get("steps") if obj.get("steps") is not None else 20, + "image_url": obj.get("image_url"), + "seed": obj.get("seed"), + "n": obj.get("n") if obj.get("n") is not None else 1, + "height": obj.get("height") if obj.get("height") is not None else 1024, + "width": obj.get("width") if obj.get("width") is not None else 1024, + "negative_prompt": obj.get("negative_prompt"), + "response_format": obj.get("response_format"), + "guidance": ( + obj.get("guidance") if obj.get("guidance") is not None else 3.5 + ), + "output_format": ( + obj.get("output_format") + if obj.get("output_format") is not None + else "jpeg" + ), + "image_loras": ( + [ + ImagesGenerationsPostRequestImageLorasInner.from_dict(_item) + for _item in obj["image_loras"] + ] + if obj.get("image_loras") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/images_generations_post_request_image_loras_inner.py b/src/together/generated/models/images_generations_post_request_image_loras_inner.py new file mode 100644 index 00000000..50f29485 --- /dev/null +++ b/src/together/generated/models/images_generations_post_request_image_loras_inner.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class ImagesGenerationsPostRequestImageLorasInner(BaseModel): + """ + ImagesGenerationsPostRequestImageLorasInner + """ # noqa: E501 + + path: StrictStr = Field( + description="The URL of the LoRA to apply (e.g. https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA)." + ) + scale: Union[StrictFloat, StrictInt] = Field( + description="The strength of the LoRA's influence. Most LoRA's recommend a value of 1." + ) + __properties: ClassVar[List[str]] = ["path", "scale"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ImagesGenerationsPostRequestImageLorasInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ImagesGenerationsPostRequestImageLorasInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"path": obj.get("path"), "scale": obj.get("scale")}) + return _obj diff --git a/src/together/generated/models/images_generations_post_request_model.py b/src/together/generated/models/images_generations_post_request_model.py new file mode 100644 index 00000000..a61ffba7 --- /dev/null +++ b/src/together/generated/models/images_generations_post_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +IMAGESGENERATIONSPOSTREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class ImagesGenerationsPostRequestModel(BaseModel): + """ + The model to use for image generation.

[See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = ImagesGenerationsPostRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in ImagesGenerationsPostRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ImagesGenerationsPostRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/linear_lr_scheduler_args.py b/src/together/generated/models/linear_lr_scheduler_args.py new file mode 100644 index 00000000..d84842f1 --- /dev/null +++ b/src/together/generated/models/linear_lr_scheduler_args.py @@ -0,0 +1,94 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing import Optional, Set +from typing_extensions import Self + + +class LinearLRSchedulerArgs(BaseModel): + """ + LinearLRSchedulerArgs + """ # noqa: E501 + + min_lr_ratio: Optional[Union[StrictFloat, StrictInt]] = Field( + default=0.0, + description="The ratio of the final learning rate to the peak learning rate", + ) + __properties: ClassVar[List[str]] = ["min_lr_ratio"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of LinearLRSchedulerArgs from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of LinearLRSchedulerArgs from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "min_lr_ratio": ( + obj.get("min_lr_ratio") + if obj.get("min_lr_ratio") is not None + else 0.0 + ) + } + ) + return _obj diff --git a/src/together/generated/models/list_endpoint.py b/src/together/generated/models/list_endpoint.py new file mode 100644 index 00000000..417949c3 --- /dev/null +++ b/src/together/generated/models/list_endpoint.py @@ -0,0 +1,136 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ListEndpoint(BaseModel): + """ + Details about an endpoint when listed via the list endpoint + """ # noqa: E501 + + object: StrictStr = Field(description="The type of object") + id: StrictStr = Field(description="Unique identifier for the endpoint") + name: StrictStr = Field(description="System name for the endpoint") + model: StrictStr = Field(description="The model deployed on this endpoint") + type: StrictStr = Field(description="The type of endpoint") + owner: StrictStr = Field(description="The owner of this endpoint") + state: StrictStr = Field(description="Current state of the endpoint") + created_at: datetime = Field(description="Timestamp when the endpoint was created") + __properties: ClassVar[List[str]] = [ + "object", + "id", + "name", + "model", + "type", + "owner", + "state", + "created_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["endpoint"]): + raise ValueError("must be one of enum values ('endpoint')") + return value + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["serverless", "dedicated"]): + raise ValueError("must be one of enum values ('serverless', 'dedicated')") + return value + + @field_validator("state") + def state_validate_enum(cls, value): + """Validates the enum""" + if value not in set( + ["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"] + ): + raise ValueError( + "must be one of enum values ('PENDING', 'STARTING', 'STARTED', 'STOPPING', 'STOPPED', 'ERROR')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListEndpoint from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListEndpoint from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "name": obj.get("name"), + "model": obj.get("model"), + "type": obj.get("type"), + "owner": obj.get("owner"), + "state": obj.get("state"), + "created_at": obj.get("created_at"), + } + ) + return _obj diff --git a/src/together/generated/models/list_endpoints200_response.py b/src/together/generated/models/list_endpoints200_response.py new file mode 100644 index 00000000..0db32f09 --- /dev/null +++ b/src/together/generated/models/list_endpoints200_response.py @@ -0,0 +1,108 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.list_endpoint import ListEndpoint +from typing import Optional, Set +from typing_extensions import Self + + +class ListEndpoints200Response(BaseModel): + """ + ListEndpoints200Response + """ # noqa: E501 + + object: StrictStr + data: List[ListEndpoint] + __properties: ClassVar[List[str]] = ["object", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListEndpoints200Response from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListEndpoints200Response from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "data": ( + [ListEndpoint.from_dict(_item) for _item in obj["data"]] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response.py b/src/together/generated/models/list_hardware200_response.py new file mode 100644 index 00000000..c18ec5f0 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response.py @@ -0,0 +1,185 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, List, Optional +from together.generated.models.list_hardware200_response_one_of import ( + ListHardware200ResponseOneOf, +) +from together.generated.models.list_hardware200_response_one_of1 import ( + ListHardware200ResponseOneOf1, +) +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +LISTHARDWARE200RESPONSE_ONE_OF_SCHEMAS = [ + "ListHardware200ResponseOneOf", + "ListHardware200ResponseOneOf1", +] + + +class ListHardware200Response(BaseModel): + """ + ListHardware200Response + """ + + # data type: ListHardware200ResponseOneOf + oneof_schema_1_validator: Optional[ListHardware200ResponseOneOf] = None + # data type: ListHardware200ResponseOneOf1 + oneof_schema_2_validator: Optional[ListHardware200ResponseOneOf1] = None + actual_instance: Optional[ + Union[ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1] + ] = None + one_of_schemas: Set[str] = { + "ListHardware200ResponseOneOf", + "ListHardware200ResponseOneOf1", + } + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = ListHardware200Response.model_construct() + error_messages = [] + match = 0 + # validate data type: ListHardware200ResponseOneOf + if not isinstance(v, ListHardware200ResponseOneOf): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ListHardware200ResponseOneOf`" + ) + else: + match += 1 + # validate data type: ListHardware200ResponseOneOf1 + if not isinstance(v, ListHardware200ResponseOneOf1): + error_messages.append( + f"Error! Input type `{type(v)}` is not `ListHardware200ResponseOneOf1`" + ) + else: + match += 1 + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into ListHardware200ResponseOneOf + try: + instance.actual_instance = ListHardware200ResponseOneOf.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into ListHardware200ResponseOneOf1 + try: + instance.actual_instance = ListHardware200ResponseOneOf1.from_json(json_str) + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into ListHardware200Response with oneOf schemas: ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[ + Union[ + Dict[str, Any], ListHardware200ResponseOneOf, ListHardware200ResponseOneOf1 + ] + ]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/list_hardware200_response_one_of.py b/src/together/generated/models/list_hardware200_response_one_of.py new file mode 100644 index 00000000..1cf0ec36 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response_one_of.py @@ -0,0 +1,113 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.list_hardware200_response_one_of_data_inner import ( + ListHardware200ResponseOneOfDataInner, +) +from typing import Optional, Set +from typing_extensions import Self + + +class ListHardware200ResponseOneOf(BaseModel): + """ + Response when no model filter is provided + """ # noqa: E501 + + object: StrictStr + data: List[ListHardware200ResponseOneOfDataInner] + __properties: ClassVar[List[str]] = ["object", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "data": ( + [ + ListHardware200ResponseOneOfDataInner.from_dict(_item) + for _item in obj["data"] + ] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of1.py b/src/together/generated/models/list_hardware200_response_one_of1.py new file mode 100644 index 00000000..171532b1 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response_one_of1.py @@ -0,0 +1,113 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.list_hardware200_response_one_of1_data_inner import ( + ListHardware200ResponseOneOf1DataInner, +) +from typing import Optional, Set +from typing_extensions import Self + + +class ListHardware200ResponseOneOf1(BaseModel): + """ + Response when model filter is provided + """ # noqa: E501 + + object: StrictStr + data: List[ListHardware200ResponseOneOf1DataInner] + __properties: ClassVar[List[str]] = ["object", "data"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["list"]): + raise ValueError("must be one of enum values ('list')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf1 from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in data (list) + _items = [] + if self.data: + for _item_data in self.data: + if _item_data: + _items.append(_item_data.to_dict()) + _dict["data"] = _items + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf1 from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "data": ( + [ + ListHardware200ResponseOneOf1DataInner.from_dict(_item) + for _item in obj["data"] + ] + if obj.get("data") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py b/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py new file mode 100644 index 00000000..db5c86a2 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response_one_of1_data_inner.py @@ -0,0 +1,140 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.hardware_availability import HardwareAvailability +from together.generated.models.hardware_spec import HardwareSpec +from typing import Optional, Set +from typing_extensions import Self + + +class ListHardware200ResponseOneOf1DataInner(BaseModel): + """ + ListHardware200ResponseOneOf1DataInner + """ # noqa: E501 + + object: StrictStr + name: StrictStr = Field( + description="Unique identifier for the hardware configuration" + ) + pricing: EndpointPricing + specs: HardwareSpec + availability: HardwareAvailability + updated_at: datetime = Field( + description="Timestamp of when the hardware status was last updated" + ) + __properties: ClassVar[List[str]] = [ + "object", + "name", + "pricing", + "specs", + "availability", + "updated_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["hardware"]): + raise ValueError("must be one of enum values ('hardware')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf1DataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of pricing + if self.pricing: + _dict["pricing"] = self.pricing.to_dict() + # override the default output from pydantic by calling `to_dict()` of specs + if self.specs: + _dict["specs"] = self.specs.to_dict() + # override the default output from pydantic by calling `to_dict()` of availability + if self.availability: + _dict["availability"] = self.availability.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOf1DataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "name": obj.get("name"), + "pricing": ( + EndpointPricing.from_dict(obj["pricing"]) + if obj.get("pricing") is not None + else None + ), + "specs": ( + HardwareSpec.from_dict(obj["specs"]) + if obj.get("specs") is not None + else None + ), + "availability": ( + HardwareAvailability.from_dict(obj["availability"]) + if obj.get("availability") is not None + else None + ), + "updated_at": obj.get("updated_at"), + } + ) + return _obj diff --git a/src/together/generated/models/list_hardware200_response_one_of_data_inner.py b/src/together/generated/models/list_hardware200_response_one_of_data_inner.py new file mode 100644 index 00000000..bc1f7a99 --- /dev/null +++ b/src/together/generated/models/list_hardware200_response_one_of_data_inner.py @@ -0,0 +1,137 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.endpoint_pricing import EndpointPricing +from together.generated.models.hardware_spec import HardwareSpec +from typing import Optional, Set +from typing_extensions import Self + + +class ListHardware200ResponseOneOfDataInner(BaseModel): + """ + ListHardware200ResponseOneOfDataInner + """ # noqa: E501 + + object: StrictStr + name: StrictStr = Field( + description="Unique identifier for the hardware configuration" + ) + pricing: EndpointPricing + specs: HardwareSpec + availability: Optional[Any] = None + updated_at: datetime = Field( + description="Timestamp of when the hardware status was last updated" + ) + __properties: ClassVar[List[str]] = [ + "object", + "name", + "pricing", + "specs", + "availability", + "updated_at", + ] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["hardware"]): + raise ValueError("must be one of enum values ('hardware')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOfDataInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of pricing + if self.pricing: + _dict["pricing"] = self.pricing.to_dict() + # override the default output from pydantic by calling `to_dict()` of specs + if self.specs: + _dict["specs"] = self.specs.to_dict() + # set to None if availability (nullable) is None + # and model_fields_set contains the field + if self.availability is None and "availability" in self.model_fields_set: + _dict["availability"] = None + + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ListHardware200ResponseOneOfDataInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "name": obj.get("name"), + "pricing": ( + EndpointPricing.from_dict(obj["pricing"]) + if obj.get("pricing") is not None + else None + ), + "specs": ( + HardwareSpec.from_dict(obj["specs"]) + if obj.get("specs") is not None + else None + ), + "availability": obj.get("availability"), + "updated_at": obj.get("updated_at"), + } + ) + return _obj diff --git a/src/together/generated/models/lo_ra_training_type.py b/src/together/generated/models/lo_ra_training_type.py new file mode 100644 index 00000000..44d82d70 --- /dev/null +++ b/src/together/generated/models/lo_ra_training_type.py @@ -0,0 +1,123 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing import Optional, Set +from typing_extensions import Self + + +class LoRATrainingType(BaseModel): + """ + LoRATrainingType + """ # noqa: E501 + + type: StrictStr + lora_r: StrictInt + lora_alpha: StrictInt + lora_dropout: Optional[Union[StrictFloat, StrictInt]] = 0.0 + lora_trainable_modules: Optional[StrictStr] = "all-linear" + __properties: ClassVar[List[str]] = [ + "type", + "lora_r", + "lora_alpha", + "lora_dropout", + "lora_trainable_modules", + ] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["Lora"]): + raise ValueError("must be one of enum values ('Lora')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of LoRATrainingType from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of LoRATrainingType from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "type": obj.get("type"), + "lora_r": obj.get("lora_r"), + "lora_alpha": obj.get("lora_alpha"), + "lora_dropout": ( + obj.get("lora_dropout") + if obj.get("lora_dropout") is not None + else 0.0 + ), + "lora_trainable_modules": ( + obj.get("lora_trainable_modules") + if obj.get("lora_trainable_modules") is not None + else "all-linear" + ), + } + ) + return _obj diff --git a/src/together/generated/models/logprobs_part.py b/src/together/generated/models/logprobs_part.py new file mode 100644 index 00000000..dd1de169 --- /dev/null +++ b/src/together/generated/models/logprobs_part.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional, Union +from typing import Optional, Set +from typing_extensions import Self + + +class LogprobsPart(BaseModel): + """ + LogprobsPart + """ # noqa: E501 + + token_ids: Optional[List[Union[StrictFloat, StrictInt]]] = Field( + default=None, description="List of token IDs corresponding to the logprobs" + ) + tokens: Optional[List[StrictStr]] = Field( + default=None, description="List of token strings" + ) + token_logprobs: Optional[List[Union[StrictFloat, StrictInt]]] = Field( + default=None, description="List of token log probabilities" + ) + __properties: ClassVar[List[str]] = ["token_ids", "tokens", "token_logprobs"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of LogprobsPart from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of LogprobsPart from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "token_ids": obj.get("token_ids"), + "tokens": obj.get("tokens"), + "token_logprobs": obj.get("token_logprobs"), + } + ) + return _obj diff --git a/src/together/generated/models/lr_scheduler.py b/src/together/generated/models/lr_scheduler.py new file mode 100644 index 00000000..4e327e05 --- /dev/null +++ b/src/together/generated/models/lr_scheduler.py @@ -0,0 +1,96 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class LRScheduler(BaseModel): + """ + LRScheduler + """ # noqa: E501 + + lr_scheduler_type: StrictStr + lr_scheduler_args: Optional[Dict[str, Any]] = None + __properties: ClassVar[List[str]] = ["lr_scheduler_type", "lr_scheduler_args"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of LRScheduler from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of lr_scheduler_args + if self.lr_scheduler_args: + _dict["lr_scheduler_args"] = self.lr_scheduler_args.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of LRScheduler from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "lr_scheduler_type": obj.get("lr_scheduler_type"), + "lr_scheduler_args": ( + LinearLRSchedulerArgs.from_dict(obj["lr_scheduler_args"]) + if obj.get("lr_scheduler_args") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/model_info.py b/src/together/generated/models/model_info.py new file mode 100644 index 00000000..61a48d64 --- /dev/null +++ b/src/together/generated/models/model_info.py @@ -0,0 +1,135 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.pricing import Pricing +from typing import Optional, Set +from typing_extensions import Self + + +class ModelInfo(BaseModel): + """ + ModelInfo + """ # noqa: E501 + + id: StrictStr + object: StrictStr + created: StrictInt + type: StrictStr + display_name: Optional[StrictStr] = None + organization: Optional[StrictStr] = None + link: Optional[StrictStr] = None + license: Optional[StrictStr] = None + context_length: Optional[StrictInt] = None + pricing: Optional[Pricing] = None + __properties: ClassVar[List[str]] = [ + "id", + "object", + "created", + "type", + "display_name", + "organization", + "link", + "license", + "context_length", + "pricing", + ] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set( + ["chat", "language", "code", "image", "embedding", "moderation", "rerank"] + ): + raise ValueError( + "must be one of enum values ('chat', 'language', 'code', 'image', 'embedding', 'moderation', 'rerank')" + ) + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ModelInfo from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of pricing + if self.pricing: + _dict["pricing"] = self.pricing.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ModelInfo from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "id": obj.get("id"), + "object": obj.get("object"), + "created": obj.get("created"), + "type": obj.get("type"), + "display_name": obj.get("display_name"), + "organization": obj.get("organization"), + "link": obj.get("link"), + "license": obj.get("license"), + "context_length": obj.get("context_length"), + "pricing": ( + Pricing.from_dict(obj["pricing"]) + if obj.get("pricing") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/pricing.py b/src/together/generated/models/pricing.py new file mode 100644 index 00000000..87350c63 --- /dev/null +++ b/src/together/generated/models/pricing.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Union +from typing import Optional, Set +from typing_extensions import Self + + +class Pricing(BaseModel): + """ + Pricing + """ # noqa: E501 + + hourly: Union[StrictFloat, StrictInt] + input: Union[StrictFloat, StrictInt] + output: Union[StrictFloat, StrictInt] + base: Union[StrictFloat, StrictInt] + finetune: Union[StrictFloat, StrictInt] + __properties: ClassVar[List[str]] = [ + "hourly", + "input", + "output", + "base", + "finetune", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of Pricing from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of Pricing from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "hourly": obj.get("hourly"), + "input": obj.get("input"), + "output": obj.get("output"), + "base": obj.get("base"), + "finetune": obj.get("finetune"), + } + ) + return _obj diff --git a/src/together/generated/models/prompt_part_inner.py b/src/together/generated/models/prompt_part_inner.py new file mode 100644 index 00000000..a999f700 --- /dev/null +++ b/src/together/generated/models/prompt_part_inner.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.logprobs_part import LogprobsPart +from typing import Optional, Set +from typing_extensions import Self + + +class PromptPartInner(BaseModel): + """ + PromptPartInner + """ # noqa: E501 + + text: Optional[StrictStr] = None + logprobs: Optional[LogprobsPart] = None + __properties: ClassVar[List[str]] = ["text", "logprobs"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of PromptPartInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of logprobs + if self.logprobs: + _dict["logprobs"] = self.logprobs.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of PromptPartInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "text": obj.get("text"), + "logprobs": ( + LogprobsPart.from_dict(obj["logprobs"]) + if obj.get("logprobs") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/rerank_request.py b/src/together/generated/models/rerank_request.py new file mode 100644 index 00000000..5a68173e --- /dev/null +++ b/src/together/generated/models/rerank_request.py @@ -0,0 +1,144 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictInt, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.rerank_request_documents import RerankRequestDocuments +from together.generated.models.rerank_request_model import RerankRequestModel +from typing import Optional, Set +from typing_extensions import Self + + +class RerankRequest(BaseModel): + """ + RerankRequest + """ # noqa: E501 + + model: RerankRequestModel + query: StrictStr = Field(description="The search query to be used for ranking.") + documents: RerankRequestDocuments + top_n: Optional[StrictInt] = Field( + default=None, description="The number of top results to return." + ) + return_documents: Optional[StrictBool] = Field( + default=None, + description="Whether to return supplied documents with the response.", + ) + rank_fields: Optional[List[StrictStr]] = Field( + default=None, + description="List of keys in the JSON Object document to rank by. Defaults to use all supplied keys for ranking.", + ) + additional_properties: Dict[str, Any] = {} + __properties: ClassVar[List[str]] = [ + "model", + "query", + "documents", + "top_n", + "return_documents", + "rank_fields", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RerankRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + * Fields in `self.additional_properties` are added to the output dict. + """ + excluded_fields: Set[str] = set( + [ + "additional_properties", + ] + ) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of model + if self.model: + _dict["model"] = self.model.to_dict() + # override the default output from pydantic by calling `to_dict()` of documents + if self.documents: + _dict["documents"] = self.documents.to_dict() + # puts key-value pairs in additional_properties in the top level + if self.additional_properties is not None: + for _key, _value in self.additional_properties.items(): + _dict[_key] = _value + + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RerankRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "model": ( + RerankRequestModel.from_dict(obj["model"]) + if obj.get("model") is not None + else None + ), + "query": obj.get("query"), + "documents": ( + RerankRequestDocuments.from_dict(obj["documents"]) + if obj.get("documents") is not None + else None + ), + "top_n": obj.get("top_n"), + "return_documents": obj.get("return_documents"), + "rank_fields": obj.get("rank_fields"), + } + ) + # store additional fields in additional_properties + for _key in obj.keys(): + if _key not in cls.__properties: + _obj.additional_properties[_key] = obj.get(_key) + + return _obj diff --git a/src/together/generated/models/rerank_request_documents.py b/src/together/generated/models/rerank_request_documents.py new file mode 100644 index 00000000..f1e24d87 --- /dev/null +++ b/src/together/generated/models/rerank_request_documents.py @@ -0,0 +1,171 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import json +import pprint +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Any, Dict, List, Optional +from pydantic import StrictStr, Field +from typing import Union, List, Set, Optional, Dict +from typing_extensions import Literal, Self + +RERANKREQUESTDOCUMENTS_ONE_OF_SCHEMAS = ["List[Dict[str, object]]", "List[str]"] + + +class RerankRequestDocuments(BaseModel): + """ + List of documents, which can be either strings or objects. + """ + + # data type: List[Dict[str, object]] + oneof_schema_1_validator: Optional[List[Dict[str, Any]]] = None + # data type: List[str] + oneof_schema_2_validator: Optional[List[StrictStr]] = None + actual_instance: Optional[Union[List[Dict[str, object]], List[str]]] = None + one_of_schemas: Set[str] = {"List[Dict[str, object]]", "List[str]"} + + model_config = ConfigDict( + validate_assignment=True, + protected_namespaces=(), + ) + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_oneof(cls, v): + instance = RerankRequestDocuments.model_construct() + error_messages = [] + match = 0 + # validate data type: List[Dict[str, object]] + try: + instance.oneof_schema_1_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: List[str] + try: + instance.oneof_schema_2_validator = v + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when setting `actual_instance` in RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when setting `actual_instance` in RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Union[str, Dict[str, Any]]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + match = 0 + + # deserialize data into List[Dict[str, object]] + try: + # validation + instance.oneof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_1_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into List[str] + try: + # validation + instance.oneof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.oneof_schema_2_validator + match += 1 + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if match > 1: + # more than 1 match + raise ValueError( + "Multiple matches found when deserializing the JSON string into RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " + + ", ".join(error_messages) + ) + elif match == 0: + # no match + raise ValueError( + "No match found when deserializing the JSON string into RerankRequestDocuments with oneOf schemas: List[Dict[str, object]], List[str]. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict( + self, + ) -> Optional[Union[Dict[str, Any], List[Dict[str, object]], List[str]]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + # primitive type + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/rerank_request_model.py b/src/together/generated/models/rerank_request_model.py new file mode 100644 index 00000000..1f72a3a6 --- /dev/null +++ b/src/together/generated/models/rerank_request_model.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +from inspect import getfullargspec +import json +import pprint +import re # noqa: F401 +from pydantic import ( + BaseModel, + ConfigDict, + Field, + StrictStr, + ValidationError, + field_validator, +) +from typing import Optional +from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict +from typing_extensions import Literal, Self +from pydantic import Field + +RERANKREQUESTMODEL_ANY_OF_SCHEMAS = ["str"] + + +class RerankRequestModel(BaseModel): + """ + The model to be used for the rerank request.

[See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models) + """ + + # data type: str + anyof_schema_1_validator: Optional[StrictStr] = None + # data type: str + anyof_schema_2_validator: Optional[StrictStr] = None + if TYPE_CHECKING: + actual_instance: Optional[Union[str]] = None + else: + actual_instance: Any = None + any_of_schemas: Set[str] = {"str"} + + model_config = { + "validate_assignment": True, + "protected_namespaces": (), + } + + def __init__(self, *args, **kwargs) -> None: + if args: + if len(args) > 1: + raise ValueError( + "If a position argument is used, only 1 is allowed to set `actual_instance`" + ) + if kwargs: + raise ValueError( + "If a position argument is used, keyword arguments cannot be used." + ) + super().__init__(actual_instance=args[0]) + else: + super().__init__(**kwargs) + + @field_validator("actual_instance") + def actual_instance_must_validate_anyof(cls, v): + instance = RerankRequestModel.model_construct() + error_messages = [] + # validate data type: str + try: + instance.anyof_schema_1_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # validate data type: str + try: + instance.anyof_schema_2_validator = v + return v + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + if error_messages: + # no match + raise ValueError( + "No match found when setting the actual_instance in RerankRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return v + + @classmethod + def from_dict(cls, obj: Dict[str, Any]) -> Self: + return cls.from_json(json.dumps(obj)) + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Returns the object represented by the json string""" + instance = cls.model_construct() + error_messages = [] + # deserialize data into str + try: + # validation + instance.anyof_schema_1_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_1_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + # deserialize data into str + try: + # validation + instance.anyof_schema_2_validator = json.loads(json_str) + # assign value to actual_instance + instance.actual_instance = instance.anyof_schema_2_validator + return instance + except (ValidationError, ValueError) as e: + error_messages.append(str(e)) + + if error_messages: + # no match + raise ValueError( + "No match found when deserializing the JSON string into RerankRequestModel with anyOf schemas: str. Details: " + + ", ".join(error_messages) + ) + else: + return instance + + def to_json(self) -> str: + """Returns the JSON representation of the actual instance""" + if self.actual_instance is None: + return "null" + + if hasattr(self.actual_instance, "to_json") and callable( + self.actual_instance.to_json + ): + return self.actual_instance.to_json() + else: + return json.dumps(self.actual_instance) + + def to_dict(self) -> Optional[Union[Dict[str, Any], str]]: + """Returns the dict representation of the actual instance""" + if self.actual_instance is None: + return None + + if hasattr(self.actual_instance, "to_dict") and callable( + self.actual_instance.to_dict + ): + return self.actual_instance.to_dict() + else: + return self.actual_instance + + def to_str(self) -> str: + """Returns the string representation of the actual instance""" + return pprint.pformat(self.model_dump()) diff --git a/src/together/generated/models/rerank_response.py b/src/together/generated/models/rerank_response.py new file mode 100644 index 00000000..edfb2fd3 --- /dev/null +++ b/src/together/generated/models/rerank_response.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.rerank_response_results_inner import ( + RerankResponseResultsInner, +) +from together.generated.models.usage_data import UsageData +from typing import Optional, Set +from typing_extensions import Self + + +class RerankResponse(BaseModel): + """ + RerankResponse + """ # noqa: E501 + + object: StrictStr = Field(description="Object type") + id: Optional[StrictStr] = Field(default=None, description="Request ID") + model: StrictStr = Field(description="The model to be used for the rerank request.") + results: List[RerankResponseResultsInner] + usage: Optional[UsageData] = None + __properties: ClassVar[List[str]] = ["object", "id", "model", "results", "usage"] + + @field_validator("object") + def object_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["rerank"]): + raise ValueError("must be one of enum values ('rerank')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RerankResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of each item in results (list) + _items = [] + if self.results: + for _item_results in self.results: + if _item_results: + _items.append(_item_results.to_dict()) + _dict["results"] = _items + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RerankResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "object": obj.get("object"), + "id": obj.get("id"), + "model": obj.get("model"), + "results": ( + [ + RerankResponseResultsInner.from_dict(_item) + for _item in obj["results"] + ] + if obj.get("results") is not None + else None + ), + "usage": ( + UsageData.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/rerank_response_results_inner.py b/src/together/generated/models/rerank_response_results_inner.py new file mode 100644 index 00000000..51610442 --- /dev/null +++ b/src/together/generated/models/rerank_response_results_inner.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt +from typing import Any, ClassVar, Dict, List, Union +from together.generated.models.rerank_response_results_inner_document import ( + RerankResponseResultsInnerDocument, +) +from typing import Optional, Set +from typing_extensions import Self + + +class RerankResponseResultsInner(BaseModel): + """ + RerankResponseResultsInner + """ # noqa: E501 + + index: StrictInt + relevance_score: Union[StrictFloat, StrictInt] + document: RerankResponseResultsInnerDocument + __properties: ClassVar[List[str]] = ["index", "relevance_score", "document"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RerankResponseResultsInner from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of document + if self.document: + _dict["document"] = self.document.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RerankResponseResultsInner from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "index": obj.get("index"), + "relevance_score": obj.get("relevance_score"), + "document": ( + RerankResponseResultsInnerDocument.from_dict(obj["document"]) + if obj.get("document") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/rerank_response_results_inner_document.py b/src/together/generated/models/rerank_response_results_inner_document.py new file mode 100644 index 00000000..51258d5a --- /dev/null +++ b/src/together/generated/models/rerank_response_results_inner_document.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class RerankResponseResultsInnerDocument(BaseModel): + """ + RerankResponseResultsInnerDocument + """ # noqa: E501 + + text: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["text"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RerankResponseResultsInnerDocument from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RerankResponseResultsInnerDocument from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"text": obj.get("text")}) + return _obj diff --git a/src/together/generated/models/stream_sentinel.py b/src/together/generated/models/stream_sentinel.py new file mode 100644 index 00000000..54c925d4 --- /dev/null +++ b/src/together/generated/models/stream_sentinel.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class StreamSentinel(BaseModel): + """ + StreamSentinel + """ # noqa: E501 + + data: StrictStr + __properties: ClassVar[List[str]] = ["data"] + + @field_validator("data") + def data_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["[DONE]"]): + raise ValueError("must be one of enum values ('[DONE]')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of StreamSentinel from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of StreamSentinel from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"data": obj.get("data")}) + return _obj diff --git a/src/together/generated/models/tool_choice.py b/src/together/generated/models/tool_choice.py new file mode 100644 index 00000000..2571dcf9 --- /dev/null +++ b/src/together/generated/models/tool_choice.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import ( + BaseModel, + ConfigDict, + StrictFloat, + StrictInt, + StrictStr, + field_validator, +) +from typing import Any, ClassVar, Dict, List, Union +from together.generated.models.tool_choice_function import ToolChoiceFunction +from typing import Optional, Set +from typing_extensions import Self + + +class ToolChoice(BaseModel): + """ + ToolChoice + """ # noqa: E501 + + index: Union[StrictFloat, StrictInt] + id: StrictStr + type: StrictStr + function: ToolChoiceFunction + __properties: ClassVar[List[str]] = ["index", "id", "type", "function"] + + @field_validator("type") + def type_validate_enum(cls, value): + """Validates the enum""" + if value not in set(["function"]): + raise ValueError("must be one of enum values ('function')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ToolChoice from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of function + if self.function: + _dict["function"] = self.function.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ToolChoice from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "index": obj.get("index"), + "id": obj.get("id"), + "type": obj.get("type"), + "function": ( + ToolChoiceFunction.from_dict(obj["function"]) + if obj.get("function") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/tool_choice_function.py b/src/together/generated/models/tool_choice_function.py new file mode 100644 index 00000000..308cbe71 --- /dev/null +++ b/src/together/generated/models/tool_choice_function.py @@ -0,0 +1,86 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class ToolChoiceFunction(BaseModel): + """ + ToolChoiceFunction + """ # noqa: E501 + + name: StrictStr + arguments: StrictStr + __properties: ClassVar[List[str]] = ["name", "arguments"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ToolChoiceFunction from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ToolChoiceFunction from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"name": obj.get("name"), "arguments": obj.get("arguments")} + ) + return _obj diff --git a/src/together/generated/models/tools_part.py b/src/together/generated/models/tools_part.py new file mode 100644 index 00000000..e26792c2 --- /dev/null +++ b/src/together/generated/models/tools_part.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.tools_part_function import ToolsPartFunction +from typing import Optional, Set +from typing_extensions import Self + + +class ToolsPart(BaseModel): + """ + ToolsPart + """ # noqa: E501 + + type: Optional[StrictStr] = None + function: Optional[ToolsPartFunction] = None + __properties: ClassVar[List[str]] = ["type", "function"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ToolsPart from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of function + if self.function: + _dict["function"] = self.function.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ToolsPart from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "type": obj.get("type"), + "function": ( + ToolsPartFunction.from_dict(obj["function"]) + if obj.get("function") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/tools_part_function.py b/src/together/generated/models/tools_part_function.py new file mode 100644 index 00000000..cbb5d419 --- /dev/null +++ b/src/together/generated/models/tools_part_function.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr +from typing import Any, ClassVar, Dict, List, Optional +from typing import Optional, Set +from typing_extensions import Self + + +class ToolsPartFunction(BaseModel): + """ + ToolsPartFunction + """ # noqa: E501 + + description: Optional[StrictStr] = None + name: Optional[StrictStr] = None + parameters: Optional[Dict[str, Any]] = Field( + default=None, description="A map of parameter names to their values." + ) + __properties: ClassVar[List[str]] = ["description", "name", "parameters"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of ToolsPartFunction from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of ToolsPartFunction from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "description": obj.get("description"), + "name": obj.get("name"), + "parameters": obj.get("parameters"), + } + ) + return _obj diff --git a/src/together/generated/models/update_endpoint_request.py b/src/together/generated/models/update_endpoint_request.py new file mode 100644 index 00000000..ee2d2ff0 --- /dev/null +++ b/src/together/generated/models/update_endpoint_request.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator +from typing import Any, ClassVar, Dict, List, Optional +from together.generated.models.autoscaling import Autoscaling +from typing import Optional, Set +from typing_extensions import Self + + +class UpdateEndpointRequest(BaseModel): + """ + UpdateEndpointRequest + """ # noqa: E501 + + display_name: Optional[StrictStr] = Field( + default=None, description="A human-readable name for the endpoint" + ) + state: Optional[StrictStr] = Field( + default=None, description="The desired state of the endpoint" + ) + autoscaling: Optional[Autoscaling] = Field( + default=None, description="New autoscaling configuration for the endpoint" + ) + __properties: ClassVar[List[str]] = ["display_name", "state", "autoscaling"] + + @field_validator("state") + def state_validate_enum(cls, value): + """Validates the enum""" + if value is None: + return value + + if value not in set(["STARTED", "STOPPED"]): + raise ValueError("must be one of enum values ('STARTED', 'STOPPED')") + return value + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of UpdateEndpointRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of autoscaling + if self.autoscaling: + _dict["autoscaling"] = self.autoscaling.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of UpdateEndpointRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "display_name": obj.get("display_name"), + "state": obj.get("state"), + "autoscaling": ( + Autoscaling.from_dict(obj["autoscaling"]) + if obj.get("autoscaling") is not None + else None + ), + } + ) + return _obj diff --git a/src/together/generated/models/usage_data.py b/src/together/generated/models/usage_data.py new file mode 100644 index 00000000..82a825c7 --- /dev/null +++ b/src/together/generated/models/usage_data.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from pydantic import BaseModel, ConfigDict, StrictInt +from typing import Any, ClassVar, Dict, List +from typing import Optional, Set +from typing_extensions import Self + + +class UsageData(BaseModel): + """ + UsageData + """ # noqa: E501 + + prompt_tokens: StrictInt + completion_tokens: StrictInt + total_tokens: StrictInt + __properties: ClassVar[List[str]] = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of UsageData from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of UsageData from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "prompt_tokens": obj.get("prompt_tokens"), + "completion_tokens": obj.get("completion_tokens"), + "total_tokens": obj.get("total_tokens"), + } + ) + return _obj diff --git a/src/together/generated/rest.py b/src/together/generated/rest.py new file mode 100644 index 00000000..0f92a615 --- /dev/null +++ b/src/together/generated/rest.py @@ -0,0 +1,195 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import io +import json +import re +import ssl +from typing import Optional, Union + +import aiohttp +import aiohttp_retry + +from together.generated.exceptions import ApiException, ApiValueError + +RESTResponseType = aiohttp.ClientResponse + +ALLOW_RETRY_METHODS = frozenset({"DELETE", "GET", "HEAD", "OPTIONS", "PUT", "TRACE"}) + + +class RESTResponse(io.IOBase): + + def __init__(self, resp) -> None: + self.response = resp + self.status = resp.status + self.reason = resp.reason + self.data = None + + async def read(self): + if self.data is None: + self.data = await self.response.read() + return self.data + + def getheaders(self): + """Returns a CIMultiDictProxy of the response headers.""" + return self.response.headers + + def getheader(self, name, default=None): + """Returns a given response header.""" + return self.response.headers.get(name, default) + + +class RESTClientObject: + + def __init__(self, configuration) -> None: + + # maxsize is number of requests to host that are allowed in parallel + self.maxsize = configuration.connection_pool_maxsize + + self.ssl_context = ssl.create_default_context(cafile=configuration.ssl_ca_cert) + if configuration.cert_file: + self.ssl_context.load_cert_chain( + configuration.cert_file, keyfile=configuration.key_file + ) + + if not configuration.verify_ssl: + self.ssl_context.check_hostname = False + self.ssl_context.verify_mode = ssl.CERT_NONE + + self.proxy = configuration.proxy + self.proxy_headers = configuration.proxy_headers + + self.retries = configuration.retries + + self.pool_manager: Optional[aiohttp.ClientSession] = None + self.retry_client: Optional[aiohttp_retry.RetryClient] = None + + async def close(self) -> None: + if self.pool_manager: + await self.pool_manager.close() + if self.retry_client is not None: + await self.retry_client.close() + + async def request( + self, + method, + url, + headers=None, + body=None, + post_params=None, + _request_timeout=None, + ): + """Execute request + + :param method: http request method + :param url: http request url + :param headers: http request headers + :param body: request json body, for `application/json` + :param post_params: request post parameters, + `application/x-www-form-urlencoded` + and `multipart/form-data` + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + """ + method = method.upper() + assert method in ["GET", "HEAD", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"] + + if post_params and body: + raise ApiValueError( + "body parameter cannot be used with post_params parameter." + ) + + post_params = post_params or {} + headers = headers or {} + # url already contains the URL query string + timeout = _request_timeout or 5 * 60 + + if "Content-Type" not in headers: + headers["Content-Type"] = "application/json" + + args = {"method": method, "url": url, "timeout": timeout, "headers": headers} + + if self.proxy: + args["proxy"] = self.proxy + if self.proxy_headers: + args["proxy_headers"] = self.proxy_headers + + # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE` + if method in ["POST", "PUT", "PATCH", "OPTIONS", "DELETE"]: + if re.search("json", headers["Content-Type"], re.IGNORECASE): + if body is not None: + body = json.dumps(body) + args["data"] = body + elif headers["Content-Type"] == "application/x-www-form-urlencoded": + args["data"] = aiohttp.FormData(post_params) + elif headers["Content-Type"] == "multipart/form-data": + # must del headers['Content-Type'], or the correct + # Content-Type which generated by aiohttp + del headers["Content-Type"] + data = aiohttp.FormData() + for param in post_params: + k, v = param + if isinstance(v, tuple) and len(v) == 3: + data.add_field(k, value=v[1], filename=v[0], content_type=v[2]) + else: + # Ensures that dict objects are serialized + if isinstance(v, dict): + v = json.dumps(v) + elif isinstance(v, int): + v = str(v) + data.add_field(k, v) + args["data"] = data + + # Pass a `bytes` or `str` parameter directly in the body to support + # other content types than Json when `body` argument is provided + # in serialized form + elif isinstance(body, str) or isinstance(body, bytes): + args["data"] = body + else: + # Cannot generate the request from given parameters + msg = """Cannot prepare a request message for provided + arguments. Please check that your arguments match + declared content type.""" + raise ApiException(status=0, reason=msg) + + pool_manager: Union[aiohttp.ClientSession, aiohttp_retry.RetryClient] + + # https pool manager + if self.pool_manager is None: + self.pool_manager = aiohttp.ClientSession( + connector=aiohttp.TCPConnector( + limit=self.maxsize, ssl=self.ssl_context + ), + trust_env=True, + ) + pool_manager = self.pool_manager + + if self.retries is not None and method in ALLOW_RETRY_METHODS: + if self.retry_client is None: + self.retry_client = aiohttp_retry.RetryClient( + client_session=self.pool_manager, + retry_options=aiohttp_retry.ExponentialRetry( + attempts=self.retries, + factor=2.0, + start_timeout=0.1, + max_timeout=120.0, + ), + ) + pool_manager = self.retry_client + + r = await pool_manager.request(**args) + + return RESTResponse(r) diff --git a/src/together/generated/test/__init__.py b/src/together/generated/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/together/generated/test/test_audio_api.py b/src/together/generated/test/test_audio_api.py new file mode 100644 index 00000000..deddc486 --- /dev/null +++ b/src/together/generated/test/test_audio_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.audio_api import AudioApi + + +class TestAudioApi(unittest.IsolatedAsyncioTestCase): + """AudioApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = AudioApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_audio_speech(self) -> None: + """Test case for audio_speech + + Create audio generation request + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request.py b/src/together/generated/test/test_audio_speech_request.py new file mode 100644 index 00000000..43362fcd --- /dev/null +++ b/src/together/generated/test/test_audio_speech_request.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_request import AudioSpeechRequest + + +class TestAudioSpeechRequest(unittest.TestCase): + """AudioSpeechRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechRequest: + """Test AudioSpeechRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechRequest` + """ + model = AudioSpeechRequest() + if include_optional: + return AudioSpeechRequest( + model = cartesia/sonic, + input = '', + voice = None, + response_format = 'wav', + language = 'en', + response_encoding = 'pcm_f32le', + sample_rate = 1.337, + stream = True + ) + else: + return AudioSpeechRequest( + model = cartesia/sonic, + input = '', + voice = None, + ) + """ + + def testAudioSpeechRequest(self): + """Test AudioSpeechRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request_model.py b/src/together/generated/test/test_audio_speech_request_model.py new file mode 100644 index 00000000..beb8ec7c --- /dev/null +++ b/src/together/generated/test/test_audio_speech_request_model.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_request_model import AudioSpeechRequestModel + + +class TestAudioSpeechRequestModel(unittest.TestCase): + """AudioSpeechRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechRequestModel: + """Test AudioSpeechRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechRequestModel` + """ + model = AudioSpeechRequestModel() + if include_optional: + return AudioSpeechRequestModel( + ) + else: + return AudioSpeechRequestModel( + ) + """ + + def testAudioSpeechRequestModel(self): + """Test AudioSpeechRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_request_voice.py b/src/together/generated/test/test_audio_speech_request_voice.py new file mode 100644 index 00000000..744d89c5 --- /dev/null +++ b/src/together/generated/test/test_audio_speech_request_voice.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_request_voice import AudioSpeechRequestVoice + + +class TestAudioSpeechRequestVoice(unittest.TestCase): + """AudioSpeechRequestVoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechRequestVoice: + """Test AudioSpeechRequestVoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechRequestVoice` + """ + model = AudioSpeechRequestVoice() + if include_optional: + return AudioSpeechRequestVoice( + ) + else: + return AudioSpeechRequestVoice( + ) + """ + + def testAudioSpeechRequestVoice(self): + """Test AudioSpeechRequestVoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_chunk.py b/src/together/generated/test/test_audio_speech_stream_chunk.py new file mode 100644 index 00000000..1335b885 --- /dev/null +++ b/src/together/generated/test/test_audio_speech_stream_chunk.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_stream_chunk import AudioSpeechStreamChunk + + +class TestAudioSpeechStreamChunk(unittest.TestCase): + """AudioSpeechStreamChunk unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechStreamChunk: + """Test AudioSpeechStreamChunk + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechStreamChunk` + """ + model = AudioSpeechStreamChunk() + if include_optional: + return AudioSpeechStreamChunk( + object = 'audio.tts.chunk', + model = 'cartesia/sonic', + b64 = '' + ) + else: + return AudioSpeechStreamChunk( + object = 'audio.tts.chunk', + model = 'cartesia/sonic', + b64 = '', + ) + """ + + def testAudioSpeechStreamChunk(self): + """Test AudioSpeechStreamChunk""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_event.py b/src/together/generated/test/test_audio_speech_stream_event.py new file mode 100644 index 00000000..68337a10 --- /dev/null +++ b/src/together/generated/test/test_audio_speech_stream_event.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_stream_event import AudioSpeechStreamEvent + + +class TestAudioSpeechStreamEvent(unittest.TestCase): + """AudioSpeechStreamEvent unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechStreamEvent: + """Test AudioSpeechStreamEvent + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechStreamEvent` + """ + model = AudioSpeechStreamEvent() + if include_optional: + return AudioSpeechStreamEvent( + data = together.generated.models.audio_speech_stream_chunk.AudioSpeechStreamChunk( + object = 'audio.tts.chunk', + model = 'cartesia/sonic', + b64 = '', ) + ) + else: + return AudioSpeechStreamEvent( + data = together.generated.models.audio_speech_stream_chunk.AudioSpeechStreamChunk( + object = 'audio.tts.chunk', + model = 'cartesia/sonic', + b64 = '', ), + ) + """ + + def testAudioSpeechStreamEvent(self): + """Test AudioSpeechStreamEvent""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_audio_speech_stream_response.py b/src/together/generated/test/test_audio_speech_stream_response.py new file mode 100644 index 00000000..e9e245bc --- /dev/null +++ b/src/together/generated/test/test_audio_speech_stream_response.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.audio_speech_stream_response import ( + AudioSpeechStreamResponse, +) + + +class TestAudioSpeechStreamResponse(unittest.TestCase): + """AudioSpeechStreamResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> AudioSpeechStreamResponse: + """Test AudioSpeechStreamResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `AudioSpeechStreamResponse` + """ + model = AudioSpeechStreamResponse() + if include_optional: + return AudioSpeechStreamResponse( + data = '[DONE]' + ) + else: + return AudioSpeechStreamResponse( + data = '[DONE]', + ) + """ + + def testAudioSpeechStreamResponse(self): + """Test AudioSpeechStreamResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_autoscaling.py b/src/together/generated/test/test_autoscaling.py new file mode 100644 index 00000000..4f8dff60 --- /dev/null +++ b/src/together/generated/test/test_autoscaling.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.autoscaling import Autoscaling + + +class TestAutoscaling(unittest.TestCase): + """Autoscaling unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> Autoscaling: + """Test Autoscaling + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `Autoscaling` + """ + model = Autoscaling() + if include_optional: + return Autoscaling( + min_replicas = 56, + max_replicas = 56 + ) + else: + return Autoscaling( + min_replicas = 56, + max_replicas = 56, + ) + """ + + def testAutoscaling(self): + """Test Autoscaling""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_api.py b/src/together/generated/test/test_chat_api.py new file mode 100644 index 00000000..55bb7cba --- /dev/null +++ b/src/together/generated/test/test_chat_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.chat_api import ChatApi + + +class TestChatApi(unittest.IsolatedAsyncioTestCase): + """ChatApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = ChatApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_chat_completions(self) -> None: + """Test case for chat_completions + + Create chat completion + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_assistant_message_param.py b/src/together/generated/test/test_chat_completion_assistant_message_param.py new file mode 100644 index 00000000..072b5db2 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_assistant_message_param.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam, +) + + +class TestChatCompletionAssistantMessageParam(unittest.TestCase): + """ChatCompletionAssistantMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionAssistantMessageParam: + """Test ChatCompletionAssistantMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionAssistantMessageParam` + """ + model = ChatCompletionAssistantMessageParam() + if include_optional: + return ChatCompletionAssistantMessageParam( + content = '', + role = 'assistant', + name = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ) + ) + else: + return ChatCompletionAssistantMessageParam( + role = 'assistant', + ) + """ + + def testChatCompletionAssistantMessageParam(self): + """Test ChatCompletionAssistantMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice.py b/src/together/generated/test/test_chat_completion_choice.py new file mode 100644 index 00000000..9618c968 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choice.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choice import ChatCompletionChoice + + +class TestChatCompletionChoice(unittest.TestCase): + """ChatCompletionChoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoice: + """Test ChatCompletionChoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoice` + """ + model = ChatCompletionChoice() + if include_optional: + return ChatCompletionChoice( + index = 56, + finish_reason = 'stop', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ), + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ) + ) + else: + return ChatCompletionChoice( + index = 56, + finish_reason = 'stop', + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), + ) + """ + + def testChatCompletionChoice(self): + """Test ChatCompletionChoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice_delta.py b/src/together/generated/test/test_chat_completion_choice_delta.py new file mode 100644 index 00000000..6e430d9e --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choice_delta.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choice_delta import ( + ChatCompletionChoiceDelta, +) + + +class TestChatCompletionChoiceDelta(unittest.TestCase): + """ChatCompletionChoiceDelta unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoiceDelta: + """Test ChatCompletionChoiceDelta + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoiceDelta` + """ + model = ChatCompletionChoiceDelta() + if include_optional: + return ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ) + ) + else: + return ChatCompletionChoiceDelta( + role = 'system', + ) + """ + + def testChatCompletionChoiceDelta(self): + """Test ChatCompletionChoiceDelta""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choice_delta_function_call.py b/src/together/generated/test/test_chat_completion_choice_delta_function_call.py new file mode 100644 index 00000000..0797b639 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choice_delta_function_call.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choice_delta_function_call import ( + ChatCompletionChoiceDeltaFunctionCall, +) + + +class TestChatCompletionChoiceDeltaFunctionCall(unittest.TestCase): + """ChatCompletionChoiceDeltaFunctionCall unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoiceDeltaFunctionCall: + """Test ChatCompletionChoiceDeltaFunctionCall + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoiceDeltaFunctionCall` + """ + model = ChatCompletionChoiceDeltaFunctionCall() + if include_optional: + return ChatCompletionChoiceDeltaFunctionCall( + arguments = '', + name = '' + ) + else: + return ChatCompletionChoiceDeltaFunctionCall( + arguments = '', + name = '', + ) + """ + + def testChatCompletionChoiceDeltaFunctionCall(self): + """Test ChatCompletionChoiceDeltaFunctionCall""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choices_data_inner.py b/src/together/generated/test/test_chat_completion_choices_data_inner.py new file mode 100644 index 00000000..d61c850b --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choices_data_inner.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choices_data_inner import ( + ChatCompletionChoicesDataInner, +) + + +class TestChatCompletionChoicesDataInner(unittest.TestCase): + """ChatCompletionChoicesDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoicesDataInner: + """Test ChatCompletionChoicesDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoicesDataInner` + """ + model = ChatCompletionChoicesDataInner() + if include_optional: + return ChatCompletionChoicesDataInner( + text = '', + index = 56, + seed = 56, + finish_reason = 'stop', + message = together.generated.models.chat_completion_message.ChatCompletionMessage( + content = '', + role = 'assistant', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ), ), + logprobs = None + ) + else: + return ChatCompletionChoicesDataInner( + ) + """ + + def testChatCompletionChoicesDataInner(self): + """Test ChatCompletionChoicesDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py b/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py new file mode 100644 index 00000000..88ae0977 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_choices_data_inner_logprobs.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_choices_data_inner_logprobs import ( + ChatCompletionChoicesDataInnerLogprobs, +) + + +class TestChatCompletionChoicesDataInnerLogprobs(unittest.TestCase): + """ChatCompletionChoicesDataInnerLogprobs unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChoicesDataInnerLogprobs: + """Test ChatCompletionChoicesDataInnerLogprobs + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChoicesDataInnerLogprobs` + """ + model = ChatCompletionChoicesDataInnerLogprobs() + if include_optional: + return ChatCompletionChoicesDataInnerLogprobs( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ] + ) + else: + return ChatCompletionChoicesDataInnerLogprobs( + ) + """ + + def testChatCompletionChoicesDataInnerLogprobs(self): + """Test ChatCompletionChoicesDataInnerLogprobs""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_chunk.py b/src/together/generated/test/test_chat_completion_chunk.py new file mode 100644 index 00000000..f935abc8 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_chunk.py @@ -0,0 +1,108 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_chunk import ChatCompletionChunk + + +class TestChatCompletionChunk(unittest.TestCase): + """ChatCompletionChunk unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChunk: + """Test ChatCompletionChunk + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChunk` + """ + model = ChatCompletionChunk() + if include_optional: + return ChatCompletionChunk( + id = '', + object = 'chat.completion.chunk', + created = 56, + system_fingerprint = '', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + choices = [ + together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), ) + ], + usage = None + ) + else: + return ChatCompletionChunk( + id = '', + object = 'chat.completion.chunk', + created = 56, + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + choices = [ + together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), ) + ], + ) + """ + + def testChatCompletionChunk(self): + """Test ChatCompletionChunk""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_chunk_choices_inner.py b/src/together/generated/test/test_chat_completion_chunk_choices_inner.py new file mode 100644 index 00000000..e7317378 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_chunk_choices_inner.py @@ -0,0 +1,92 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_chunk_choices_inner import ( + ChatCompletionChunkChoicesInner, +) + + +class TestChatCompletionChunkChoicesInner(unittest.TestCase): + """ChatCompletionChunkChoicesInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionChunkChoicesInner: + """Test ChatCompletionChunkChoicesInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionChunkChoicesInner` + """ + model = ChatCompletionChunkChoicesInner() + if include_optional: + return ChatCompletionChunkChoicesInner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ) + ) + else: + return ChatCompletionChunkChoicesInner( + index = 56, + finish_reason = 'stop', + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), + ) + """ + + def testChatCompletionChunkChoicesInner(self): + """Test ChatCompletionChunkChoicesInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_event.py b/src/together/generated/test/test_chat_completion_event.py new file mode 100644 index 00000000..c219c114 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_event.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_event import ChatCompletionEvent + + +class TestChatCompletionEvent(unittest.TestCase): + """ChatCompletionEvent unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionEvent: + """Test ChatCompletionEvent + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionEvent` + """ + model = ChatCompletionEvent() + if include_optional: + return ChatCompletionEvent( + data = together.generated.models.chat_completion_chunk.ChatCompletionChunk( + id = '', + object = 'chat.completion.chunk', + created = 56, + system_fingerprint = '', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + choices = [ + together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), ) + ], + usage = null, ) + ) + else: + return ChatCompletionEvent( + data = together.generated.models.chat_completion_chunk.ChatCompletionChunk( + id = '', + object = 'chat.completion.chunk', + created = 56, + system_fingerprint = '', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + choices = [ + together.generated.models.chat_completion_chunk_choices_inner.ChatCompletionChunk_choices_inner( + index = 56, + finish_reason = 'stop', + logprobs = 1.337, + seed = 56, + delta = together.generated.models.chat_completion_choice_delta.ChatCompletionChoiceDelta( + token_id = 56, + role = 'system', + content = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_choice_delta_function_call.ChatCompletionChoiceDelta_function_call( + arguments = '', + name = '', ), ), ) + ], + usage = null, ), + ) + """ + + def testChatCompletionEvent(self): + """Test ChatCompletionEvent""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_function_message_param.py b/src/together/generated/test/test_chat_completion_function_message_param.py new file mode 100644 index 00000000..07cfa130 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_function_message_param.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam, +) + + +class TestChatCompletionFunctionMessageParam(unittest.TestCase): + """ChatCompletionFunctionMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionFunctionMessageParam: + """Test ChatCompletionFunctionMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionFunctionMessageParam` + """ + model = ChatCompletionFunctionMessageParam() + if include_optional: + return ChatCompletionFunctionMessageParam( + role = 'function', + content = '', + name = '' + ) + else: + return ChatCompletionFunctionMessageParam( + role = 'function', + content = '', + name = '', + ) + """ + + def testChatCompletionFunctionMessageParam(self): + """Test ChatCompletionFunctionMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message.py b/src/together/generated/test/test_chat_completion_message.py new file mode 100644 index 00000000..6e60a844 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_message.py @@ -0,0 +1,68 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_message import ChatCompletionMessage + + +class TestChatCompletionMessage(unittest.TestCase): + """ChatCompletionMessage unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionMessage: + """Test ChatCompletionMessage + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionMessage` + """ + model = ChatCompletionMessage() + if include_optional: + return ChatCompletionMessage( + content = '', + role = 'assistant', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ) + ) + else: + return ChatCompletionMessage( + content = '', + role = 'assistant', + ) + """ + + def testChatCompletionMessage(self): + """Test ChatCompletionMessage""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message_function_call.py b/src/together/generated/test/test_chat_completion_message_function_call.py new file mode 100644 index 00000000..90f0dbc7 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_message_function_call.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_message_function_call import ( + ChatCompletionMessageFunctionCall, +) + + +class TestChatCompletionMessageFunctionCall(unittest.TestCase): + """ChatCompletionMessageFunctionCall unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionMessageFunctionCall: + """Test ChatCompletionMessageFunctionCall + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionMessageFunctionCall` + """ + model = ChatCompletionMessageFunctionCall() + if include_optional: + return ChatCompletionMessageFunctionCall( + arguments = '', + name = '' + ) + else: + return ChatCompletionMessageFunctionCall( + arguments = '', + name = '', + ) + """ + + def testChatCompletionMessageFunctionCall(self): + """Test ChatCompletionMessageFunctionCall""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_message_param.py b/src/together/generated/test/test_chat_completion_message_param.py new file mode 100644 index 00000000..d463e95d --- /dev/null +++ b/src/together/generated/test/test_chat_completion_message_param.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_message_param import ( + ChatCompletionMessageParam, +) + + +class TestChatCompletionMessageParam(unittest.TestCase): + """ChatCompletionMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionMessageParam: + """Test ChatCompletionMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionMessageParam` + """ + model = ChatCompletionMessageParam() + if include_optional: + return ChatCompletionMessageParam( + content = '', + role = 'function', + name = '', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ), + tool_call_id = '' + ) + else: + return ChatCompletionMessageParam( + content = '', + role = 'function', + name = '', + tool_call_id = '', + ) + """ + + def testChatCompletionMessageParam(self): + """Test ChatCompletionMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request.py b/src/together/generated/test/test_chat_completion_request.py new file mode 100644 index 00000000..d84e32d6 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request.py @@ -0,0 +1,98 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request import ChatCompletionRequest + + +class TestChatCompletionRequest(unittest.TestCase): + """ChatCompletionRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequest: + """Test ChatCompletionRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequest` + """ + model = ChatCompletionRequest() + if include_optional: + return ChatCompletionRequest( + messages = [ + together.generated.models.chat_completion_request_messages_inner.ChatCompletionRequest_messages_inner( + role = 'system', + content = '', ) + ], + model = meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo, + max_tokens = 56, + stop = [ + '' + ], + temperature = 1.337, + top_p = 1.337, + top_k = 56, + context_length_exceeded_behavior = 'error', + repetition_penalty = 1.337, + stream = True, + logprobs = 0, + echo = True, + n = 1, + min_p = 1.337, + presence_penalty = 1.337, + frequency_penalty = 1.337, + logit_bias = {1024=-10.5, 105=21.4}, + seed = 42, + function_call = None, + response_format = together.generated.models.chat_completion_request_response_format.ChatCompletionRequest_response_format( + type = 'json', + schema = { + 'key' : '' + }, ), + tools = [ + together.generated.models.tools_part.ToolsPart( + type = 'tool_type', + function = together.generated.models.tools_part_function.ToolsPart_function( + description = 'A description of the function.', + name = 'function_name', + parameters = { }, ), ) + ], + tool_choice = None, + safety_model = 'safety_model_name' + ) + else: + return ChatCompletionRequest( + messages = [ + together.generated.models.chat_completion_request_messages_inner.ChatCompletionRequest_messages_inner( + role = 'system', + content = '', ) + ], + model = meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo, + ) + """ + + def testChatCompletionRequest(self): + """Test ChatCompletionRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_function_call.py b/src/together/generated/test/test_chat_completion_request_function_call.py new file mode 100644 index 00000000..55125eb8 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_function_call.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_function_call import ( + ChatCompletionRequestFunctionCall, +) + + +class TestChatCompletionRequestFunctionCall(unittest.TestCase): + """ChatCompletionRequestFunctionCall unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestFunctionCall: + """Test ChatCompletionRequestFunctionCall + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestFunctionCall` + """ + model = ChatCompletionRequestFunctionCall() + if include_optional: + return ChatCompletionRequestFunctionCall( + name = '' + ) + else: + return ChatCompletionRequestFunctionCall( + name = '', + ) + """ + + def testChatCompletionRequestFunctionCall(self): + """Test ChatCompletionRequestFunctionCall""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_function_call_one_of.py b/src/together/generated/test/test_chat_completion_request_function_call_one_of.py new file mode 100644 index 00000000..58ec5841 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_function_call_one_of.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_function_call_one_of import ( + ChatCompletionRequestFunctionCallOneOf, +) + + +class TestChatCompletionRequestFunctionCallOneOf(unittest.TestCase): + """ChatCompletionRequestFunctionCallOneOf unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestFunctionCallOneOf: + """Test ChatCompletionRequestFunctionCallOneOf + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestFunctionCallOneOf` + """ + model = ChatCompletionRequestFunctionCallOneOf() + if include_optional: + return ChatCompletionRequestFunctionCallOneOf( + name = '' + ) + else: + return ChatCompletionRequestFunctionCallOneOf( + name = '', + ) + """ + + def testChatCompletionRequestFunctionCallOneOf(self): + """Test ChatCompletionRequestFunctionCallOneOf""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_messages_inner.py b/src/together/generated/test/test_chat_completion_request_messages_inner.py new file mode 100644 index 00000000..4d799742 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_messages_inner.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_messages_inner import ( + ChatCompletionRequestMessagesInner, +) + + +class TestChatCompletionRequestMessagesInner(unittest.TestCase): + """ChatCompletionRequestMessagesInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestMessagesInner: + """Test ChatCompletionRequestMessagesInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestMessagesInner` + """ + model = ChatCompletionRequestMessagesInner() + if include_optional: + return ChatCompletionRequestMessagesInner( + role = 'system', + content = '' + ) + else: + return ChatCompletionRequestMessagesInner( + role = 'system', + content = '', + ) + """ + + def testChatCompletionRequestMessagesInner(self): + """Test ChatCompletionRequestMessagesInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_model.py b/src/together/generated/test/test_chat_completion_request_model.py new file mode 100644 index 00000000..1f18e0f7 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_model.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_model import ( + ChatCompletionRequestModel, +) + + +class TestChatCompletionRequestModel(unittest.TestCase): + """ChatCompletionRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestModel: + """Test ChatCompletionRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestModel` + """ + model = ChatCompletionRequestModel() + if include_optional: + return ChatCompletionRequestModel( + ) + else: + return ChatCompletionRequestModel( + ) + """ + + def testChatCompletionRequestModel(self): + """Test ChatCompletionRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_response_format.py b/src/together/generated/test/test_chat_completion_request_response_format.py new file mode 100644 index 00000000..e6f5241b --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_response_format.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_response_format import ( + ChatCompletionRequestResponseFormat, +) + + +class TestChatCompletionRequestResponseFormat(unittest.TestCase): + """ChatCompletionRequestResponseFormat unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestResponseFormat: + """Test ChatCompletionRequestResponseFormat + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestResponseFormat` + """ + model = ChatCompletionRequestResponseFormat() + if include_optional: + return ChatCompletionRequestResponseFormat( + type = 'json', + var_schema = { + 'key' : '' + } + ) + else: + return ChatCompletionRequestResponseFormat( + ) + """ + + def testChatCompletionRequestResponseFormat(self): + """Test ChatCompletionRequestResponseFormat""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_request_tool_choice.py b/src/together/generated/test/test_chat_completion_request_tool_choice.py new file mode 100644 index 00000000..b04e7456 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_request_tool_choice.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_request_tool_choice import ( + ChatCompletionRequestToolChoice, +) + + +class TestChatCompletionRequestToolChoice(unittest.TestCase): + """ChatCompletionRequestToolChoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionRequestToolChoice: + """Test ChatCompletionRequestToolChoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionRequestToolChoice` + """ + model = ChatCompletionRequestToolChoice() + if include_optional: + return ChatCompletionRequestToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ) + ) + else: + return ChatCompletionRequestToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), + ) + """ + + def testChatCompletionRequestToolChoice(self): + """Test ChatCompletionRequestToolChoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_response.py b/src/together/generated/test/test_chat_completion_response.py new file mode 100644 index 00000000..e02f8cb3 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_response.py @@ -0,0 +1,110 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_response import ChatCompletionResponse + + +class TestChatCompletionResponse(unittest.TestCase): + """ChatCompletionResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionResponse: + """Test ChatCompletionResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionResponse` + """ + model = ChatCompletionResponse() + if include_optional: + return ChatCompletionResponse( + id = '', + choices = [ + together.generated.models.chat_completion_choices_data_inner.ChatCompletionChoicesData_inner( + text = '', + index = 56, + seed = 56, + finish_reason = 'stop', + message = together.generated.models.chat_completion_message.ChatCompletionMessage( + content = '', + role = 'assistant', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ), ), + logprobs = null, ) + ], + usage = together.generated.models.usage_data.UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, ), + created = 56, + model = '', + object = 'chat.completion' + ) + else: + return ChatCompletionResponse( + id = '', + choices = [ + together.generated.models.chat_completion_choices_data_inner.ChatCompletionChoicesData_inner( + text = '', + index = 56, + seed = 56, + finish_reason = 'stop', + message = together.generated.models.chat_completion_message.ChatCompletionMessage( + content = '', + role = 'assistant', + tool_calls = [ + together.generated.models.tool_choice.ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), ) + ], + function_call = together.generated.models.chat_completion_message_function_call.ChatCompletionMessage_function_call( + arguments = '', + name = '', ), ), + logprobs = null, ) + ], + created = 56, + model = '', + object = 'chat.completion', + ) + """ + + def testChatCompletionResponse(self): + """Test ChatCompletionResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_stream.py b/src/together/generated/test/test_chat_completion_stream.py new file mode 100644 index 00000000..29643caa --- /dev/null +++ b/src/together/generated/test/test_chat_completion_stream.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_stream import ChatCompletionStream + + +class TestChatCompletionStream(unittest.TestCase): + """ChatCompletionStream unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionStream: + """Test ChatCompletionStream + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionStream` + """ + model = ChatCompletionStream() + if include_optional: + return ChatCompletionStream( + data = '[DONE]' + ) + else: + return ChatCompletionStream( + data = '[DONE]', + ) + """ + + def testChatCompletionStream(self): + """Test ChatCompletionStream""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_system_message_param.py b/src/together/generated/test/test_chat_completion_system_message_param.py new file mode 100644 index 00000000..33b8a0c8 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_system_message_param.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_system_message_param import ( + ChatCompletionSystemMessageParam, +) + + +class TestChatCompletionSystemMessageParam(unittest.TestCase): + """ChatCompletionSystemMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionSystemMessageParam: + """Test ChatCompletionSystemMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionSystemMessageParam` + """ + model = ChatCompletionSystemMessageParam() + if include_optional: + return ChatCompletionSystemMessageParam( + content = '', + role = 'system', + name = '' + ) + else: + return ChatCompletionSystemMessageParam( + content = '', + role = 'system', + ) + """ + + def testChatCompletionSystemMessageParam(self): + """Test ChatCompletionSystemMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_token.py b/src/together/generated/test/test_chat_completion_token.py new file mode 100644 index 00000000..131f6c4d --- /dev/null +++ b/src/together/generated/test/test_chat_completion_token.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_token import ChatCompletionToken + + +class TestChatCompletionToken(unittest.TestCase): + """ChatCompletionToken unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionToken: + """Test ChatCompletionToken + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionToken` + """ + model = ChatCompletionToken() + if include_optional: + return ChatCompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True + ) + else: + return ChatCompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, + ) + """ + + def testChatCompletionToken(self): + """Test ChatCompletionToken""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool.py b/src/together/generated/test/test_chat_completion_tool.py new file mode 100644 index 00000000..2f795a90 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_tool.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_tool import ChatCompletionTool + + +class TestChatCompletionTool(unittest.TestCase): + """ChatCompletionTool unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionTool: + """Test ChatCompletionTool + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionTool` + """ + model = ChatCompletionTool() + if include_optional: + return ChatCompletionTool( + type = 'function', + function = together.generated.models.chat_completion_tool_function.ChatCompletionTool_function( + description = '', + name = '', + parameters = { + 'key' : null + }, ) + ) + else: + return ChatCompletionTool( + type = 'function', + function = together.generated.models.chat_completion_tool_function.ChatCompletionTool_function( + description = '', + name = '', + parameters = { + 'key' : null + }, ), + ) + """ + + def testChatCompletionTool(self): + """Test ChatCompletionTool""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool_function.py b/src/together/generated/test/test_chat_completion_tool_function.py new file mode 100644 index 00000000..0d370610 --- /dev/null +++ b/src/together/generated/test/test_chat_completion_tool_function.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_tool_function import ( + ChatCompletionToolFunction, +) + + +class TestChatCompletionToolFunction(unittest.TestCase): + """ChatCompletionToolFunction unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionToolFunction: + """Test ChatCompletionToolFunction + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionToolFunction` + """ + model = ChatCompletionToolFunction() + if include_optional: + return ChatCompletionToolFunction( + description = '', + name = '', + parameters = { + 'key' : null + } + ) + else: + return ChatCompletionToolFunction( + name = '', + ) + """ + + def testChatCompletionToolFunction(self): + """Test ChatCompletionToolFunction""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_tool_message_param.py b/src/together/generated/test/test_chat_completion_tool_message_param.py new file mode 100644 index 00000000..90aece7d --- /dev/null +++ b/src/together/generated/test/test_chat_completion_tool_message_param.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_tool_message_param import ( + ChatCompletionToolMessageParam, +) + + +class TestChatCompletionToolMessageParam(unittest.TestCase): + """ChatCompletionToolMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionToolMessageParam: + """Test ChatCompletionToolMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionToolMessageParam` + """ + model = ChatCompletionToolMessageParam() + if include_optional: + return ChatCompletionToolMessageParam( + role = 'tool', + content = '', + tool_call_id = '' + ) + else: + return ChatCompletionToolMessageParam( + role = 'tool', + content = '', + tool_call_id = '', + ) + """ + + def testChatCompletionToolMessageParam(self): + """Test ChatCompletionToolMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_chat_completion_user_message_param.py b/src/together/generated/test/test_chat_completion_user_message_param.py new file mode 100644 index 00000000..7571b70e --- /dev/null +++ b/src/together/generated/test/test_chat_completion_user_message_param.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.chat_completion_user_message_param import ( + ChatCompletionUserMessageParam, +) + + +class TestChatCompletionUserMessageParam(unittest.TestCase): + """ChatCompletionUserMessageParam unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ChatCompletionUserMessageParam: + """Test ChatCompletionUserMessageParam + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ChatCompletionUserMessageParam` + """ + model = ChatCompletionUserMessageParam() + if include_optional: + return ChatCompletionUserMessageParam( + content = '', + role = 'user', + name = '' + ) + else: + return ChatCompletionUserMessageParam( + content = '', + role = 'user', + ) + """ + + def testChatCompletionUserMessageParam(self): + """Test ChatCompletionUserMessageParam""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_api.py b/src/together/generated/test/test_completion_api.py new file mode 100644 index 00000000..7a0eaeae --- /dev/null +++ b/src/together/generated/test/test_completion_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.completion_api import CompletionApi + + +class TestCompletionApi(unittest.IsolatedAsyncioTestCase): + """CompletionApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = CompletionApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_completions(self) -> None: + """Test case for completions + + Create completion + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_choice.py b/src/together/generated/test/test_completion_choice.py new file mode 100644 index 00000000..a3ce4b5c --- /dev/null +++ b/src/together/generated/test/test_completion_choice.py @@ -0,0 +1,53 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_choice import CompletionChoice + + +class TestCompletionChoice(unittest.TestCase): + """CompletionChoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionChoice: + """Test CompletionChoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionChoice` + """ + model = CompletionChoice() + if include_optional: + return CompletionChoice( + text = '' + ) + else: + return CompletionChoice( + ) + """ + + def testCompletionChoice(self): + """Test CompletionChoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_choices_data_inner.py b/src/together/generated/test/test_completion_choices_data_inner.py new file mode 100644 index 00000000..d71d81c7 --- /dev/null +++ b/src/together/generated/test/test_completion_choices_data_inner.py @@ -0,0 +1,67 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_choices_data_inner import ( + CompletionChoicesDataInner, +) + + +class TestCompletionChoicesDataInner(unittest.TestCase): + """CompletionChoicesDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionChoicesDataInner: + """Test CompletionChoicesDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionChoicesDataInner` + """ + model = CompletionChoicesDataInner() + if include_optional: + return CompletionChoicesDataInner( + text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', + seed = 56, + finish_reason = 'stop', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ) + ) + else: + return CompletionChoicesDataInner( + ) + """ + + def testCompletionChoicesDataInner(self): + """Test CompletionChoicesDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_chunk.py b/src/together/generated/test/test_completion_chunk.py new file mode 100644 index 00000000..448b44ff --- /dev/null +++ b/src/together/generated/test/test_completion_chunk.py @@ -0,0 +1,77 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_chunk import CompletionChunk + + +class TestCompletionChunk(unittest.TestCase): + """CompletionChunk unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionChunk: + """Test CompletionChunk + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionChunk` + """ + model = CompletionChunk() + if include_optional: + return CompletionChunk( + id = '', + token = together.generated.models.completion_token.CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, ), + choices = [ + together.generated.models.completion_choice.CompletionChoice( + text = '', ) + ], + usage = None, + seed = 56, + finish_reason = None + ) + else: + return CompletionChunk( + id = '', + token = together.generated.models.completion_token.CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, ), + choices = [ + together.generated.models.completion_choice.CompletionChoice( + text = '', ) + ], + usage = None, + finish_reason = None, + ) + """ + + def testCompletionChunk(self): + """Test CompletionChunk""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_chunk_usage.py b/src/together/generated/test/test_completion_chunk_usage.py new file mode 100644 index 00000000..09f1a850 --- /dev/null +++ b/src/together/generated/test/test_completion_chunk_usage.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_chunk_usage import CompletionChunkUsage + + +class TestCompletionChunkUsage(unittest.TestCase): + """CompletionChunkUsage unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionChunkUsage: + """Test CompletionChunkUsage + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionChunkUsage` + """ + model = CompletionChunkUsage() + if include_optional: + return CompletionChunkUsage( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56 + ) + else: + return CompletionChunkUsage( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, + ) + """ + + def testCompletionChunkUsage(self): + """Test CompletionChunkUsage""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_event.py b/src/together/generated/test/test_completion_event.py new file mode 100644 index 00000000..26181ffc --- /dev/null +++ b/src/together/generated/test/test_completion_event.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_event import CompletionEvent + + +class TestCompletionEvent(unittest.TestCase): + """CompletionEvent unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionEvent: + """Test CompletionEvent + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionEvent` + """ + model = CompletionEvent() + if include_optional: + return CompletionEvent( + data = together.generated.models.completion_chunk.CompletionChunk( + id = '', + token = together.generated.models.completion_token.CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, ), + choices = [ + together.generated.models.completion_choice.CompletionChoice( + text = '', ) + ], + usage = null, + seed = 56, + finish_reason = null, ) + ) + else: + return CompletionEvent( + data = together.generated.models.completion_chunk.CompletionChunk( + id = '', + token = together.generated.models.completion_token.CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, ), + choices = [ + together.generated.models.completion_choice.CompletionChoice( + text = '', ) + ], + usage = null, + seed = 56, + finish_reason = null, ), + ) + """ + + def testCompletionEvent(self): + """Test CompletionEvent""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_request.py b/src/together/generated/test/test_completion_request.py new file mode 100644 index 00000000..3a823073 --- /dev/null +++ b/src/together/generated/test/test_completion_request.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_request import CompletionRequest + + +class TestCompletionRequest(unittest.TestCase): + """CompletionRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionRequest: + """Test CompletionRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionRequest` + """ + model = CompletionRequest() + if include_optional: + return CompletionRequest( + prompt = '[INST] What is the capital of France? [/INST]', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + max_tokens = 56, + stop = [ + '' + ], + temperature = 1.337, + top_p = 1.337, + top_k = 56, + repetition_penalty = 1.337, + stream = True, + logprobs = 0, + echo = True, + n = 1, + safety_model = 'safety_model_name', + min_p = 1.337, + presence_penalty = 1.337, + frequency_penalty = 1.337, + logit_bias = {1024=-10.5, 105=21.4}, + seed = 42 + ) + else: + return CompletionRequest( + prompt = '[INST] What is the capital of France? [/INST]', + model = 'mistralai/Mixtral-8x7B-Instruct-v0.1', + ) + """ + + def testCompletionRequest(self): + """Test CompletionRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_request_model.py b/src/together/generated/test/test_completion_request_model.py new file mode 100644 index 00000000..d3cfa734 --- /dev/null +++ b/src/together/generated/test/test_completion_request_model.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_request_model import CompletionRequestModel + + +class TestCompletionRequestModel(unittest.TestCase): + """CompletionRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionRequestModel: + """Test CompletionRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionRequestModel` + """ + model = CompletionRequestModel() + if include_optional: + return CompletionRequestModel( + ) + else: + return CompletionRequestModel( + ) + """ + + def testCompletionRequestModel(self): + """Test CompletionRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_request_safety_model.py b/src/together/generated/test/test_completion_request_safety_model.py new file mode 100644 index 00000000..fb7228a9 --- /dev/null +++ b/src/together/generated/test/test_completion_request_safety_model.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_request_safety_model import ( + CompletionRequestSafetyModel, +) + + +class TestCompletionRequestSafetyModel(unittest.TestCase): + """CompletionRequestSafetyModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionRequestSafetyModel: + """Test CompletionRequestSafetyModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionRequestSafetyModel` + """ + model = CompletionRequestSafetyModel() + if include_optional: + return CompletionRequestSafetyModel( + ) + else: + return CompletionRequestSafetyModel( + ) + """ + + def testCompletionRequestSafetyModel(self): + """Test CompletionRequestSafetyModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_response.py b/src/together/generated/test/test_completion_response.py new file mode 100644 index 00000000..f4003cf5 --- /dev/null +++ b/src/together/generated/test/test_completion_response.py @@ -0,0 +1,114 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_response import CompletionResponse + + +class TestCompletionResponse(unittest.TestCase): + """CompletionResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionResponse: + """Test CompletionResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionResponse` + """ + model = CompletionResponse() + if include_optional: + return CompletionResponse( + id = '', + choices = [ + together.generated.models.completion_choices_data_inner.CompletionChoicesData_inner( + text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', + seed = 56, + finish_reason = 'stop', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ), ) + ], + prompt = [ + together.generated.models.prompt_part_inner.PromptPart_inner( + text = '[INST] What is the capital of France? [/INST]', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ), ) + ], + usage = together.generated.models.usage_data.UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, ), + created = 56, + model = '', + object = 'text_completion' + ) + else: + return CompletionResponse( + id = '', + choices = [ + together.generated.models.completion_choices_data_inner.CompletionChoicesData_inner( + text = 'The capital of France is Paris. It's located in the north-central part of the country and is one of the most populous and visited cities in the world, known for its iconic landmarks like the Eiffel Tower, Louvre Museum, Notre-Dame Cathedral, and more. Paris is also the capital of the Île-de-France region and is a major global center for art, fashion, gastronomy, and culture.', + seed = 56, + finish_reason = 'stop', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ), ) + ], + usage = together.generated.models.usage_data.UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, ), + created = 56, + model = '', + object = 'text_completion', + ) + """ + + def testCompletionResponse(self): + """Test CompletionResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_stream.py b/src/together/generated/test/test_completion_stream.py new file mode 100644 index 00000000..9edbd934 --- /dev/null +++ b/src/together/generated/test/test_completion_stream.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_stream import CompletionStream + + +class TestCompletionStream(unittest.TestCase): + """CompletionStream unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionStream: + """Test CompletionStream + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionStream` + """ + model = CompletionStream() + if include_optional: + return CompletionStream( + data = '[DONE]' + ) + else: + return CompletionStream( + data = '[DONE]', + ) + """ + + def testCompletionStream(self): + """Test CompletionStream""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_completion_token.py b/src/together/generated/test/test_completion_token.py new file mode 100644 index 00000000..a15263c6 --- /dev/null +++ b/src/together/generated/test/test_completion_token.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.completion_token import CompletionToken + + +class TestCompletionToken(unittest.TestCase): + """CompletionToken unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CompletionToken: + """Test CompletionToken + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CompletionToken` + """ + model = CompletionToken() + if include_optional: + return CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True + ) + else: + return CompletionToken( + id = 56, + text = '', + logprob = 1.337, + special = True, + ) + """ + + def testCompletionToken(self): + """Test CompletionToken""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_create_endpoint_request.py b/src/together/generated/test/test_create_endpoint_request.py new file mode 100644 index 00000000..b5e35dd0 --- /dev/null +++ b/src/together/generated/test/test_create_endpoint_request.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.create_endpoint_request import CreateEndpointRequest + + +class TestCreateEndpointRequest(unittest.TestCase): + """CreateEndpointRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CreateEndpointRequest: + """Test CreateEndpointRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `CreateEndpointRequest` + """ + model = CreateEndpointRequest() + if include_optional: + return CreateEndpointRequest( + display_name = '', + model = '', + hardware = '', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ), + disable_prompt_cache = True, + disable_speculative_decoding = True, + state = 'STARTED' + ) + else: + return CreateEndpointRequest( + model = '', + hardware = '', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ), + ) + """ + + def testCreateEndpointRequest(self): + """Test CreateEndpointRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_dedicated_endpoint.py b/src/together/generated/test/test_dedicated_endpoint.py new file mode 100644 index 00000000..61edfee9 --- /dev/null +++ b/src/together/generated/test/test_dedicated_endpoint.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.dedicated_endpoint import DedicatedEndpoint + + +class TestDedicatedEndpoint(unittest.TestCase): + """DedicatedEndpoint unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> DedicatedEndpoint: + """Test DedicatedEndpoint + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `DedicatedEndpoint` + """ + model = DedicatedEndpoint() + if include_optional: + return DedicatedEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'devuser/meta-llama/Llama-3-8b-chat-hf-a32b82a1', + display_name = 'My Llama3 70b endpoint', + model = 'meta-llama/Llama-3-8b-chat-hf', + hardware = '1x_nvidia_a100_80gb_sxm', + type = 'dedicated', + owner = 'devuser', + state = 'STARTED', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ), + created_at = '2025-02-04T10:43:55.405Z' + ) + else: + return DedicatedEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'devuser/meta-llama/Llama-3-8b-chat-hf-a32b82a1', + display_name = 'My Llama3 70b endpoint', + model = 'meta-llama/Llama-3-8b-chat-hf', + hardware = '1x_nvidia_a100_80gb_sxm', + type = 'dedicated', + owner = 'devuser', + state = 'STARTED', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ), + created_at = '2025-02-04T10:43:55.405Z', + ) + """ + + def testDedicatedEndpoint(self): + """Test DedicatedEndpoint""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_api.py b/src/together/generated/test/test_embeddings_api.py new file mode 100644 index 00000000..c63a7b1e --- /dev/null +++ b/src/together/generated/test/test_embeddings_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.embeddings_api import EmbeddingsApi + + +class TestEmbeddingsApi(unittest.IsolatedAsyncioTestCase): + """EmbeddingsApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = EmbeddingsApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_embeddings(self) -> None: + """Test case for embeddings + + Create embedding + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_request.py b/src/together/generated/test/test_embeddings_request.py new file mode 100644 index 00000000..0652a2ff --- /dev/null +++ b/src/together/generated/test/test_embeddings_request.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_request import EmbeddingsRequest + + +class TestEmbeddingsRequest(unittest.TestCase): + """EmbeddingsRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsRequest: + """Test EmbeddingsRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsRequest` + """ + model = EmbeddingsRequest() + if include_optional: + return EmbeddingsRequest( + model = 'togethercomputer/m2-bert-80M-8k-retrieval', + input = Our solar system orbits the Milky Way galaxy at about 515,000 mph + ) + else: + return EmbeddingsRequest( + model = 'togethercomputer/m2-bert-80M-8k-retrieval', + input = Our solar system orbits the Milky Way galaxy at about 515,000 mph, + ) + """ + + def testEmbeddingsRequest(self): + """Test EmbeddingsRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_request_input.py b/src/together/generated/test/test_embeddings_request_input.py new file mode 100644 index 00000000..bb51de9a --- /dev/null +++ b/src/together/generated/test/test_embeddings_request_input.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_request_input import EmbeddingsRequestInput + + +class TestEmbeddingsRequestInput(unittest.TestCase): + """EmbeddingsRequestInput unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsRequestInput: + """Test EmbeddingsRequestInput + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsRequestInput` + """ + model = EmbeddingsRequestInput() + if include_optional: + return EmbeddingsRequestInput( + ) + else: + return EmbeddingsRequestInput( + ) + """ + + def testEmbeddingsRequestInput(self): + """Test EmbeddingsRequestInput""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_request_model.py b/src/together/generated/test/test_embeddings_request_model.py new file mode 100644 index 00000000..e31f5837 --- /dev/null +++ b/src/together/generated/test/test_embeddings_request_model.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_request_model import EmbeddingsRequestModel + + +class TestEmbeddingsRequestModel(unittest.TestCase): + """EmbeddingsRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsRequestModel: + """Test EmbeddingsRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsRequestModel` + """ + model = EmbeddingsRequestModel() + if include_optional: + return EmbeddingsRequestModel( + ) + else: + return EmbeddingsRequestModel( + ) + """ + + def testEmbeddingsRequestModel(self): + """Test EmbeddingsRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_response.py b/src/together/generated/test/test_embeddings_response.py new file mode 100644 index 00000000..0a09847c --- /dev/null +++ b/src/together/generated/test/test_embeddings_response.py @@ -0,0 +1,72 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_response import EmbeddingsResponse + + +class TestEmbeddingsResponse(unittest.TestCase): + """EmbeddingsResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsResponse: + """Test EmbeddingsResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsResponse` + """ + model = EmbeddingsResponse() + if include_optional: + return EmbeddingsResponse( + object = 'list', + model = '', + data = [ + together.generated.models.embeddings_response_data_inner.EmbeddingsResponse_data_inner( + object = 'embedding', + embedding = [ + 1.337 + ], + index = 56, ) + ] + ) + else: + return EmbeddingsResponse( + object = 'list', + model = '', + data = [ + together.generated.models.embeddings_response_data_inner.EmbeddingsResponse_data_inner( + object = 'embedding', + embedding = [ + 1.337 + ], + index = 56, ) + ], + ) + """ + + def testEmbeddingsResponse(self): + """Test EmbeddingsResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_embeddings_response_data_inner.py b/src/together/generated/test/test_embeddings_response_data_inner.py new file mode 100644 index 00000000..88a95c8b --- /dev/null +++ b/src/together/generated/test/test_embeddings_response_data_inner.py @@ -0,0 +1,64 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.embeddings_response_data_inner import ( + EmbeddingsResponseDataInner, +) + + +class TestEmbeddingsResponseDataInner(unittest.TestCase): + """EmbeddingsResponseDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EmbeddingsResponseDataInner: + """Test EmbeddingsResponseDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EmbeddingsResponseDataInner` + """ + model = EmbeddingsResponseDataInner() + if include_optional: + return EmbeddingsResponseDataInner( + object = 'embedding', + embedding = [ + 1.337 + ], + index = 56 + ) + else: + return EmbeddingsResponseDataInner( + object = 'embedding', + embedding = [ + 1.337 + ], + index = 56, + ) + """ + + def testEmbeddingsResponseDataInner(self): + """Test EmbeddingsResponseDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_endpoint_pricing.py b/src/together/generated/test/test_endpoint_pricing.py new file mode 100644 index 00000000..e99001c4 --- /dev/null +++ b/src/together/generated/test/test_endpoint_pricing.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.endpoint_pricing import EndpointPricing + + +class TestEndpointPricing(unittest.TestCase): + """EndpointPricing unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> EndpointPricing: + """Test EndpointPricing + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `EndpointPricing` + """ + model = EndpointPricing() + if include_optional: + return EndpointPricing( + cents_per_minute = 1.337 + ) + else: + return EndpointPricing( + cents_per_minute = 1.337, + ) + """ + + def testEndpointPricing(self): + """Test EndpointPricing""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_endpoints_api.py b/src/together/generated/test/test_endpoints_api.py new file mode 100644 index 00000000..9d384219 --- /dev/null +++ b/src/together/generated/test/test_endpoints_api.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.endpoints_api import EndpointsApi + + +class TestEndpointsApi(unittest.IsolatedAsyncioTestCase): + """EndpointsApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = EndpointsApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_create_endpoint(self) -> None: + """Test case for create_endpoint + + Create a dedicated endpoint, it will start automatically + """ + pass + + async def test_delete_endpoint(self) -> None: + """Test case for delete_endpoint + + Delete endpoint + """ + pass + + async def test_get_endpoint(self) -> None: + """Test case for get_endpoint + + Get endpoint by ID + """ + pass + + async def test_list_endpoints(self) -> None: + """Test case for list_endpoints + + List all endpoints, can be filtered by type + """ + pass + + async def test_update_endpoint(self) -> None: + """Test case for update_endpoint + + Update endpoint, this can also be used to start or stop a dedicated endpoint + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_error_data.py b/src/together/generated/test/test_error_data.py new file mode 100644 index 00000000..0f91ac2e --- /dev/null +++ b/src/together/generated/test/test_error_data.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.error_data import ErrorData + + +class TestErrorData(unittest.TestCase): + """ErrorData unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ErrorData: + """Test ErrorData + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ErrorData` + """ + model = ErrorData() + if include_optional: + return ErrorData( + error = together.generated.models.error_data_error.ErrorData_error( + message = '', + type = '', + param = '', + code = '', ) + ) + else: + return ErrorData( + error = together.generated.models.error_data_error.ErrorData_error( + message = '', + type = '', + param = '', + code = '', ), + ) + """ + + def testErrorData(self): + """Test ErrorData""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_error_data_error.py b/src/together/generated/test/test_error_data_error.py new file mode 100644 index 00000000..a6952f0a --- /dev/null +++ b/src/together/generated/test/test_error_data_error.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.error_data_error import ErrorDataError + + +class TestErrorDataError(unittest.TestCase): + """ErrorDataError unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ErrorDataError: + """Test ErrorDataError + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ErrorDataError` + """ + model = ErrorDataError() + if include_optional: + return ErrorDataError( + message = '', + type = '', + param = '', + code = '' + ) + else: + return ErrorDataError( + message = '', + type = '', + ) + """ + + def testErrorDataError(self): + """Test ErrorDataError""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_file_delete_response.py b/src/together/generated/test/test_file_delete_response.py new file mode 100644 index 00000000..5e0cf618 --- /dev/null +++ b/src/together/generated/test/test_file_delete_response.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.file_delete_response import FileDeleteResponse + + +class TestFileDeleteResponse(unittest.TestCase): + """FileDeleteResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FileDeleteResponse: + """Test FileDeleteResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FileDeleteResponse` + """ + model = FileDeleteResponse() + if include_optional: + return FileDeleteResponse( + id = '', + deleted = True + ) + else: + return FileDeleteResponse( + ) + """ + + def testFileDeleteResponse(self): + """Test FileDeleteResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_file_list.py b/src/together/generated/test/test_file_list.py new file mode 100644 index 00000000..e3984f30 --- /dev/null +++ b/src/together/generated/test/test_file_list.py @@ -0,0 +1,76 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.file_list import FileList + + +class TestFileList(unittest.TestCase): + """FileList unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FileList: + """Test FileList + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FileList` + """ + model = FileList() + if include_optional: + return FileList( + data = [ + together.generated.models.file_response.FileResponse( + id = '', + object = 'file', + created_at = 1715021438, + filename = 'my_file.jsonl', + bytes = 2664, + purpose = 'fine-tune', + processed = True, + file_type = 'jsonl', + line_count = 56, ) + ] + ) + else: + return FileList( + data = [ + together.generated.models.file_response.FileResponse( + id = '', + object = 'file', + created_at = 1715021438, + filename = 'my_file.jsonl', + bytes = 2664, + purpose = 'fine-tune', + processed = True, + file_type = 'jsonl', + line_count = 56, ) + ], + ) + """ + + def testFileList(self): + """Test FileList""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_file_object.py b/src/together/generated/test/test_file_object.py new file mode 100644 index 00000000..a242cc02 --- /dev/null +++ b/src/together/generated/test/test_file_object.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.file_object import FileObject + + +class TestFileObject(unittest.TestCase): + """FileObject unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FileObject: + """Test FileObject + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FileObject` + """ + model = FileObject() + if include_optional: + return FileObject( + object = '', + id = '', + filename = '', + size = 56 + ) + else: + return FileObject( + ) + """ + + def testFileObject(self): + """Test FileObject""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_file_response.py b/src/together/generated/test/test_file_response.py new file mode 100644 index 00000000..06164546 --- /dev/null +++ b/src/together/generated/test/test_file_response.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.file_response import FileResponse + + +class TestFileResponse(unittest.TestCase): + """FileResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FileResponse: + """Test FileResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FileResponse` + """ + model = FileResponse() + if include_optional: + return FileResponse( + id = '', + object = 'file', + created_at = 1715021438, + filename = 'my_file.jsonl', + bytes = 2664, + purpose = 'fine-tune', + processed = True, + file_type = 'jsonl', + line_count = 56 + ) + else: + return FileResponse( + id = '', + object = 'file', + created_at = 1715021438, + filename = 'my_file.jsonl', + bytes = 2664, + purpose = 'fine-tune', + processed = True, + file_type = 'jsonl', + line_count = 56, + ) + """ + + def testFileResponse(self): + """Test FileResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_files_api.py b/src/together/generated/test/test_files_api.py new file mode 100644 index 00000000..0e5269ff --- /dev/null +++ b/src/together/generated/test/test_files_api.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.files_api import FilesApi + + +class TestFilesApi(unittest.IsolatedAsyncioTestCase): + """FilesApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = FilesApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_files_get(self) -> None: + """Test case for files_get + + List all files + """ + pass + + async def test_files_id_content_get(self) -> None: + """Test case for files_id_content_get + + Get file contents + """ + pass + + async def test_files_id_delete(self) -> None: + """Test case for files_id_delete + + Delete a file + """ + pass + + async def test_files_id_get(self) -> None: + """Test case for files_id_get + + List file + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tune_event.py b/src/together/generated/test/test_fine_tune_event.py new file mode 100644 index 00000000..f61a7b5f --- /dev/null +++ b/src/together/generated/test/test_fine_tune_event.py @@ -0,0 +1,79 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.fine_tune_event import FineTuneEvent + + +class TestFineTuneEvent(unittest.TestCase): + """FineTuneEvent unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FineTuneEvent: + """Test FineTuneEvent + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FineTuneEvent` + """ + model = FineTuneEvent() + if include_optional: + return FineTuneEvent( + object = 'fine-tune-event', + created_at = '', + level = ERROR_TO_EXAMPLE_VALUE, + message = '', + type = 'job_pending', + param_count = 56, + token_count = 56, + total_steps = 56, + wandb_url = '', + step = 56, + checkpoint_path = '', + model_path = '', + training_offset = 56, + hash = '' + ) + else: + return FineTuneEvent( + object = 'fine-tune-event', + created_at = '', + message = '', + type = 'job_pending', + param_count = 56, + token_count = 56, + total_steps = 56, + wandb_url = '', + step = 56, + checkpoint_path = '', + model_path = '', + training_offset = 56, + hash = '', + ) + """ + + def testFineTuneEvent(self): + """Test FineTuneEvent""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request.py b/src/together/generated/test/test_fine_tunes_post_request.py new file mode 100644 index 00000000..6e8a1471 --- /dev/null +++ b/src/together/generated/test/test_fine_tunes_post_request.py @@ -0,0 +1,76 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.fine_tunes_post_request import FineTunesPostRequest + + +class TestFineTunesPostRequest(unittest.TestCase): + """FineTunesPostRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FineTunesPostRequest: + """Test FineTunesPostRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FineTunesPostRequest` + """ + model = FineTunesPostRequest() + if include_optional: + return FineTunesPostRequest( + training_file = '', + validation_file = '', + model = '', + n_epochs = 56, + n_checkpoints = 56, + n_evals = 56, + batch_size = 56, + learning_rate = 1.337, + lr_scheduler = together.generated.models.lr_scheduler.LRScheduler( + lr_scheduler_type = '', + lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( + min_lr_ratio = 1.337, ), ), + warmup_ratio = 1.337, + max_grad_norm = 1.337, + weight_decay = 1.337, + suffix = '', + wandb_api_key = '', + wandb_base_url = '', + wandb_project_name = '', + wandb_name = '', + train_on_inputs = True, + training_type = together.generated.models._fine_tunes_post_request_training_type._fine_tunes_post_request_training_type() + ) + else: + return FineTunesPostRequest( + training_file = '', + model = '', + ) + """ + + def testFineTunesPostRequest(self): + """Test FineTunesPostRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py b/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py new file mode 100644 index 00000000..06d1d703 --- /dev/null +++ b/src/together/generated/test/test_fine_tunes_post_request_train_on_inputs.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.fine_tunes_post_request_train_on_inputs import ( + FineTunesPostRequestTrainOnInputs, +) + + +class TestFineTunesPostRequestTrainOnInputs(unittest.TestCase): + """FineTunesPostRequestTrainOnInputs unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FineTunesPostRequestTrainOnInputs: + """Test FineTunesPostRequestTrainOnInputs + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FineTunesPostRequestTrainOnInputs` + """ + model = FineTunesPostRequestTrainOnInputs() + if include_optional: + return FineTunesPostRequestTrainOnInputs( + ) + else: + return FineTunesPostRequestTrainOnInputs( + ) + """ + + def testFineTunesPostRequestTrainOnInputs(self): + """Test FineTunesPostRequestTrainOnInputs""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tunes_post_request_training_type.py b/src/together/generated/test/test_fine_tunes_post_request_training_type.py new file mode 100644 index 00000000..b92881ff --- /dev/null +++ b/src/together/generated/test/test_fine_tunes_post_request_training_type.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.fine_tunes_post_request_training_type import ( + FineTunesPostRequestTrainingType, +) + + +class TestFineTunesPostRequestTrainingType(unittest.TestCase): + """FineTunesPostRequestTrainingType unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FineTunesPostRequestTrainingType: + """Test FineTunesPostRequestTrainingType + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FineTunesPostRequestTrainingType` + """ + model = FineTunesPostRequestTrainingType() + if include_optional: + return FineTunesPostRequestTrainingType( + type = 'Full', + lora_r = 56, + lora_alpha = 56, + lora_dropout = 1.337, + lora_trainable_modules = 'all-linear' + ) + else: + return FineTunesPostRequestTrainingType( + type = 'Full', + lora_r = 56, + lora_alpha = 56, + ) + """ + + def testFineTunesPostRequestTrainingType(self): + """Test FineTunesPostRequestTrainingType""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_fine_tuning_api.py b/src/together/generated/test/test_fine_tuning_api.py new file mode 100644 index 00000000..dab43fa7 --- /dev/null +++ b/src/together/generated/test/test_fine_tuning_api.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.fine_tuning_api import FineTuningApi + + +class TestFineTuningApi(unittest.IsolatedAsyncioTestCase): + """FineTuningApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = FineTuningApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_fine_tunes_get(self) -> None: + """Test case for fine_tunes_get + + List all jobs + """ + pass + + async def test_fine_tunes_id_cancel_post(self) -> None: + """Test case for fine_tunes_id_cancel_post + + Cancel job + """ + pass + + async def test_fine_tunes_id_events_get(self) -> None: + """Test case for fine_tunes_id_events_get + + List job events + """ + pass + + async def test_fine_tunes_id_get(self) -> None: + """Test case for fine_tunes_id_get + + List job + """ + pass + + async def test_fine_tunes_post(self) -> None: + """Test case for fine_tunes_post + + Create job + """ + pass + + async def test_finetune_download_get(self) -> None: + """Test case for finetune_download_get + + Download model + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_download_result.py b/src/together/generated/test/test_finetune_download_result.py new file mode 100644 index 00000000..a0136246 --- /dev/null +++ b/src/together/generated/test/test_finetune_download_result.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_download_result import FinetuneDownloadResult + + +class TestFinetuneDownloadResult(unittest.TestCase): + """FinetuneDownloadResult unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneDownloadResult: + """Test FinetuneDownloadResult + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneDownloadResult` + """ + model = FinetuneDownloadResult() + if include_optional: + return FinetuneDownloadResult( + object = ERROR_TO_EXAMPLE_VALUE, + id = '', + checkpoint_step = 56, + filename = '', + size = 56 + ) + else: + return FinetuneDownloadResult( + ) + """ + + def testFinetuneDownloadResult(self): + """Test FinetuneDownloadResult""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_event_levels.py b/src/together/generated/test/test_finetune_event_levels.py new file mode 100644 index 00000000..c82e8354 --- /dev/null +++ b/src/together/generated/test/test_finetune_event_levels.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_event_levels import FinetuneEventLevels + + +class TestFinetuneEventLevels(unittest.TestCase): + """FinetuneEventLevels unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testFinetuneEventLevels(self): + """Test FinetuneEventLevels""" + # inst = FinetuneEventLevels() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_event_type.py b/src/together/generated/test/test_finetune_event_type.py new file mode 100644 index 00000000..6340f74d --- /dev/null +++ b/src/together/generated/test/test_finetune_event_type.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_event_type import FinetuneEventType + + +class TestFinetuneEventType(unittest.TestCase): + """FinetuneEventType unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testFinetuneEventType(self): + """Test FinetuneEventType""" + # inst = FinetuneEventType() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_job_status.py b/src/together/generated/test/test_finetune_job_status.py new file mode 100644 index 00000000..2bbee5ee --- /dev/null +++ b/src/together/generated/test/test_finetune_job_status.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_job_status import FinetuneJobStatus + + +class TestFinetuneJobStatus(unittest.TestCase): + """FinetuneJobStatus unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testFinetuneJobStatus(self): + """Test FinetuneJobStatus""" + # inst = FinetuneJobStatus() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_list.py b/src/together/generated/test/test_finetune_list.py new file mode 100644 index 00000000..40d16304 --- /dev/null +++ b/src/together/generated/test/test_finetune_list.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_list import FinetuneList + + +class TestFinetuneList(unittest.TestCase): + """FinetuneList unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneList: + """Test FinetuneList + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneList` + """ + model = FinetuneList() + if include_optional: + return FinetuneList( + data = ERROR_TO_EXAMPLE_VALUE + ) + else: + return FinetuneList( + data = ERROR_TO_EXAMPLE_VALUE, + ) + """ + + def testFinetuneList(self): + """Test FinetuneList""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_list_events.py b/src/together/generated/test/test_finetune_list_events.py new file mode 100644 index 00000000..5170de82 --- /dev/null +++ b/src/together/generated/test/test_finetune_list_events.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_list_events import FinetuneListEvents + + +class TestFinetuneListEvents(unittest.TestCase): + """FinetuneListEvents unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneListEvents: + """Test FinetuneListEvents + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneListEvents` + """ + model = FinetuneListEvents() + if include_optional: + return FinetuneListEvents( + data = ERROR_TO_EXAMPLE_VALUE + ) + else: + return FinetuneListEvents( + data = ERROR_TO_EXAMPLE_VALUE, + ) + """ + + def testFinetuneListEvents(self): + """Test FinetuneListEvents""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_response.py b/src/together/generated/test/test_finetune_response.py new file mode 100644 index 00000000..288a4a9b --- /dev/null +++ b/src/together/generated/test/test_finetune_response.py @@ -0,0 +1,89 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_response import FinetuneResponse + + +class TestFinetuneResponse(unittest.TestCase): + """FinetuneResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneResponse: + """Test FinetuneResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneResponse` + """ + model = FinetuneResponse() + if include_optional: + return FinetuneResponse( + id = '', + training_file = '', + validation_file = '', + model = '', + model_output_name = '', + model_output_path = '', + trainingfile_numlines = 56, + trainingfile_size = 56, + created_at = '', + updated_at = '', + n_epochs = 56, + n_checkpoints = 56, + n_evals = 56, + batch_size = 56, + learning_rate = 1.337, + lr_scheduler = together.generated.models.lr_scheduler.LRScheduler( + lr_scheduler_type = '', + lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( + min_lr_ratio = 1.337, ), ), + warmup_ratio = 1.337, + max_grad_norm = 1.337, + weight_decay = 1.337, + eval_steps = 56, + train_on_inputs = None, + training_type = together.generated.models._fine_tunes_post_request_training_type._fine_tunes_post_request_training_type(), + status = 'pending', + job_id = '', + events = ERROR_TO_EXAMPLE_VALUE, + token_count = 56, + param_count = 56, + total_price = 56, + epochs_completed = 56, + queue_depth = 56, + wandb_project_name = '', + wandb_url = '' + ) + else: + return FinetuneResponse( + id = '', + status = 'pending', + ) + """ + + def testFinetuneResponse(self): + """Test FinetuneResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finetune_response_train_on_inputs.py b/src/together/generated/test/test_finetune_response_train_on_inputs.py new file mode 100644 index 00000000..f6133122 --- /dev/null +++ b/src/together/generated/test/test_finetune_response_train_on_inputs.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finetune_response_train_on_inputs import ( + FinetuneResponseTrainOnInputs, +) + + +class TestFinetuneResponseTrainOnInputs(unittest.TestCase): + """FinetuneResponseTrainOnInputs unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FinetuneResponseTrainOnInputs: + """Test FinetuneResponseTrainOnInputs + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FinetuneResponseTrainOnInputs` + """ + model = FinetuneResponseTrainOnInputs() + if include_optional: + return FinetuneResponseTrainOnInputs( + ) + else: + return FinetuneResponseTrainOnInputs( + ) + """ + + def testFinetuneResponseTrainOnInputs(self): + """Test FinetuneResponseTrainOnInputs""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_finish_reason.py b/src/together/generated/test/test_finish_reason.py new file mode 100644 index 00000000..02204fb6 --- /dev/null +++ b/src/together/generated/test/test_finish_reason.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.finish_reason import FinishReason + + +class TestFinishReason(unittest.TestCase): + """FinishReason unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testFinishReason(self): + """Test FinishReason""" + # inst = FinishReason() + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_full_training_type.py b/src/together/generated/test/test_full_training_type.py new file mode 100644 index 00000000..303187e3 --- /dev/null +++ b/src/together/generated/test/test_full_training_type.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.full_training_type import FullTrainingType + + +class TestFullTrainingType(unittest.TestCase): + """FullTrainingType unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> FullTrainingType: + """Test FullTrainingType + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `FullTrainingType` + """ + model = FullTrainingType() + if include_optional: + return FullTrainingType( + type = 'Full' + ) + else: + return FullTrainingType( + type = 'Full', + ) + """ + + def testFullTrainingType(self): + """Test FullTrainingType""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_hardware_api.py b/src/together/generated/test/test_hardware_api.py new file mode 100644 index 00000000..a347ff1c --- /dev/null +++ b/src/together/generated/test/test_hardware_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.hardware_api import HardwareApi + + +class TestHardwareApi(unittest.IsolatedAsyncioTestCase): + """HardwareApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = HardwareApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_list_hardware(self) -> None: + """Test case for list_hardware + + List available hardware configurations + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_hardware_availability.py b/src/together/generated/test/test_hardware_availability.py new file mode 100644 index 00000000..cf7d4016 --- /dev/null +++ b/src/together/generated/test/test_hardware_availability.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.hardware_availability import HardwareAvailability + + +class TestHardwareAvailability(unittest.TestCase): + """HardwareAvailability unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> HardwareAvailability: + """Test HardwareAvailability + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `HardwareAvailability` + """ + model = HardwareAvailability() + if include_optional: + return HardwareAvailability( + status = 'available' + ) + else: + return HardwareAvailability( + status = 'available', + ) + """ + + def testHardwareAvailability(self): + """Test HardwareAvailability""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_hardware_spec.py b/src/together/generated/test/test_hardware_spec.py new file mode 100644 index 00000000..f9888c33 --- /dev/null +++ b/src/together/generated/test/test_hardware_spec.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.hardware_spec import HardwareSpec + + +class TestHardwareSpec(unittest.TestCase): + """HardwareSpec unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> HardwareSpec: + """Test HardwareSpec + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `HardwareSpec` + """ + model = HardwareSpec() + if include_optional: + return HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56 + ) + else: + return HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, + ) + """ + + def testHardwareSpec(self): + """Test HardwareSpec""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_hardware_with_status.py b/src/together/generated/test/test_hardware_with_status.py new file mode 100644 index 00000000..a6ca05f6 --- /dev/null +++ b/src/together/generated/test/test_hardware_with_status.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.hardware_with_status import HardwareWithStatus + + +class TestHardwareWithStatus(unittest.TestCase): + """HardwareWithStatus unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> HardwareWithStatus: + """Test HardwareWithStatus + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `HardwareWithStatus` + """ + model = HardwareWithStatus() + if include_optional: + return HardwareWithStatus( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = together.generated.models.hardware_availability.HardwareAvailability( + status = 'available', ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') + ) + else: + return HardwareWithStatus( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + ) + """ + + def testHardwareWithStatus(self): + """Test HardwareWithStatus""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_image_response.py b/src/together/generated/test/test_image_response.py new file mode 100644 index 00000000..fd124ab7 --- /dev/null +++ b/src/together/generated/test/test_image_response.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.image_response import ImageResponse + + +class TestImageResponse(unittest.TestCase): + """ImageResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ImageResponse: + """Test ImageResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImageResponse` + """ + model = ImageResponse() + if include_optional: + return ImageResponse( + id = '', + model = '', + object = 'list', + data = [ + together.generated.models.image_response_data_inner.ImageResponse_data_inner( + index = 56, + b64_json = '', + url = '', ) + ] + ) + else: + return ImageResponse( + id = '', + model = '', + object = 'list', + data = [ + together.generated.models.image_response_data_inner.ImageResponse_data_inner( + index = 56, + b64_json = '', + url = '', ) + ], + ) + """ + + def testImageResponse(self): + """Test ImageResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_image_response_data_inner.py b/src/together/generated/test/test_image_response_data_inner.py new file mode 100644 index 00000000..f12a697c --- /dev/null +++ b/src/together/generated/test/test_image_response_data_inner.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.image_response_data_inner import ImageResponseDataInner + + +class TestImageResponseDataInner(unittest.TestCase): + """ImageResponseDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ImageResponseDataInner: + """Test ImageResponseDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImageResponseDataInner` + """ + model = ImageResponseDataInner() + if include_optional: + return ImageResponseDataInner( + index = 56, + b64_json = '', + url = '' + ) + else: + return ImageResponseDataInner( + index = 56, + ) + """ + + def testImageResponseDataInner(self): + """Test ImageResponseDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_images_api.py b/src/together/generated/test/test_images_api.py new file mode 100644 index 00000000..14888a0f --- /dev/null +++ b/src/together/generated/test/test_images_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.images_api import ImagesApi + + +class TestImagesApi(unittest.IsolatedAsyncioTestCase): + """ImagesApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = ImagesApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_images_generations_post(self) -> None: + """Test case for images_generations_post + + Create image + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request.py b/src/together/generated/test/test_images_generations_post_request.py new file mode 100644 index 00000000..4376a118 --- /dev/null +++ b/src/together/generated/test/test_images_generations_post_request.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.images_generations_post_request import ( + ImagesGenerationsPostRequest, +) + + +class TestImagesGenerationsPostRequest(unittest.TestCase): + """ImagesGenerationsPostRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ImagesGenerationsPostRequest: + """Test ImagesGenerationsPostRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImagesGenerationsPostRequest` + """ + model = ImagesGenerationsPostRequest() + if include_optional: + return ImagesGenerationsPostRequest( + prompt = 'cat floating in space, cinematic', + model = 'black-forest-labs/FLUX.1-schnell', + steps = 56, + image_url = '', + seed = 56, + n = 56, + height = 56, + width = 56, + negative_prompt = '', + response_format = 'base64', + guidance = 1.337, + output_format = 'jpeg', + image_loras = [ + together.generated.models._images_generations_post_request_image_loras_inner._images_generations_post_request_image_loras_inner( + path = '', + scale = 1.337, ) + ] + ) + else: + return ImagesGenerationsPostRequest( + prompt = 'cat floating in space, cinematic', + model = 'black-forest-labs/FLUX.1-schnell', + ) + """ + + def testImagesGenerationsPostRequest(self): + """Test ImagesGenerationsPostRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py b/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py new file mode 100644 index 00000000..1fa6c7b0 --- /dev/null +++ b/src/together/generated/test/test_images_generations_post_request_image_loras_inner.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.images_generations_post_request_image_loras_inner import ( + ImagesGenerationsPostRequestImageLorasInner, +) + + +class TestImagesGenerationsPostRequestImageLorasInner(unittest.TestCase): + """ImagesGenerationsPostRequestImageLorasInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance( + self, include_optional + ) -> ImagesGenerationsPostRequestImageLorasInner: + """Test ImagesGenerationsPostRequestImageLorasInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImagesGenerationsPostRequestImageLorasInner` + """ + model = ImagesGenerationsPostRequestImageLorasInner() + if include_optional: + return ImagesGenerationsPostRequestImageLorasInner( + path = '', + scale = 1.337 + ) + else: + return ImagesGenerationsPostRequestImageLorasInner( + path = '', + scale = 1.337, + ) + """ + + def testImagesGenerationsPostRequestImageLorasInner(self): + """Test ImagesGenerationsPostRequestImageLorasInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_images_generations_post_request_model.py b/src/together/generated/test/test_images_generations_post_request_model.py new file mode 100644 index 00000000..3cc3c613 --- /dev/null +++ b/src/together/generated/test/test_images_generations_post_request_model.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.images_generations_post_request_model import ( + ImagesGenerationsPostRequestModel, +) + + +class TestImagesGenerationsPostRequestModel(unittest.TestCase): + """ImagesGenerationsPostRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ImagesGenerationsPostRequestModel: + """Test ImagesGenerationsPostRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ImagesGenerationsPostRequestModel` + """ + model = ImagesGenerationsPostRequestModel() + if include_optional: + return ImagesGenerationsPostRequestModel( + ) + else: + return ImagesGenerationsPostRequestModel( + ) + """ + + def testImagesGenerationsPostRequestModel(self): + """Test ImagesGenerationsPostRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_linear_lr_scheduler_args.py b/src/together/generated/test/test_linear_lr_scheduler_args.py new file mode 100644 index 00000000..a1181988 --- /dev/null +++ b/src/together/generated/test/test_linear_lr_scheduler_args.py @@ -0,0 +1,53 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.linear_lr_scheduler_args import LinearLRSchedulerArgs + + +class TestLinearLRSchedulerArgs(unittest.TestCase): + """LinearLRSchedulerArgs unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> LinearLRSchedulerArgs: + """Test LinearLRSchedulerArgs + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `LinearLRSchedulerArgs` + """ + model = LinearLRSchedulerArgs() + if include_optional: + return LinearLRSchedulerArgs( + min_lr_ratio = 1.337 + ) + else: + return LinearLRSchedulerArgs( + ) + """ + + def testLinearLRSchedulerArgs(self): + """Test LinearLRSchedulerArgs""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_endpoint.py b/src/together/generated/test/test_list_endpoint.py new file mode 100644 index 00000000..54ad619a --- /dev/null +++ b/src/together/generated/test/test_list_endpoint.py @@ -0,0 +1,68 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_endpoint import ListEndpoint + + +class TestListEndpoint(unittest.TestCase): + """ListEndpoint unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListEndpoint: + """Test ListEndpoint + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListEndpoint` + """ + model = ListEndpoint() + if include_optional: + return ListEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'allenai/OLMo-7B', + model = 'allenai/OLMo-7B', + type = 'serverless', + owner = 'together', + state = 'STARTED', + created_at = '2024-02-28T21:34:35.444Z' + ) + else: + return ListEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'allenai/OLMo-7B', + model = 'allenai/OLMo-7B', + type = 'serverless', + owner = 'together', + state = 'STARTED', + created_at = '2024-02-28T21:34:35.444Z', + ) + """ + + def testListEndpoint(self): + """Test ListEndpoint""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_endpoints200_response.py b/src/together/generated/test/test_list_endpoints200_response.py new file mode 100644 index 00000000..246d4f99 --- /dev/null +++ b/src/together/generated/test/test_list_endpoints200_response.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_endpoints200_response import ( + ListEndpoints200Response, +) + + +class TestListEndpoints200Response(unittest.TestCase): + """ListEndpoints200Response unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListEndpoints200Response: + """Test ListEndpoints200Response + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListEndpoints200Response` + """ + model = ListEndpoints200Response() + if include_optional: + return ListEndpoints200Response( + object = 'list', + data = [ + together.generated.models.list_endpoint.ListEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'allenai/OLMo-7B', + model = 'allenai/OLMo-7B', + type = 'serverless', + owner = 'together', + state = 'STARTED', + created_at = '2024-02-28T21:34:35.444Z', ) + ] + ) + else: + return ListEndpoints200Response( + object = 'list', + data = [ + together.generated.models.list_endpoint.ListEndpoint( + object = 'endpoint', + id = 'endpoint-d23901de-ef8f-44bf-b3e7-de9c1ca8f2d7', + name = 'allenai/OLMo-7B', + model = 'allenai/OLMo-7B', + type = 'serverless', + owner = 'together', + state = 'STARTED', + created_at = '2024-02-28T21:34:35.444Z', ) + ], + ) + """ + + def testListEndpoints200Response(self): + """Test ListEndpoints200Response""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response.py b/src/together/generated/test/test_list_hardware200_response.py new file mode 100644 index 00000000..bcc8dda5 --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response import ListHardware200Response + + +class TestListHardware200Response(unittest.TestCase): + """ListHardware200Response unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200Response: + """Test ListHardware200Response + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200Response` + """ + model = ListHardware200Response() + if include_optional: + return ListHardware200Response( + object = 'list', + data = [ + null + ] + ) + else: + return ListHardware200Response( + object = 'list', + data = [ + null + ], + ) + """ + + def testListHardware200Response(self): + """Test ListHardware200Response""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of.py b/src/together/generated/test/test_list_hardware200_response_one_of.py new file mode 100644 index 00000000..9b25ae66 --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response_one_of.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response_one_of import ( + ListHardware200ResponseOneOf, +) + + +class TestListHardware200ResponseOneOf(unittest.TestCase): + """ListHardware200ResponseOneOf unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200ResponseOneOf: + """Test ListHardware200ResponseOneOf + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200ResponseOneOf` + """ + model = ListHardware200ResponseOneOf() + if include_optional: + return ListHardware200ResponseOneOf( + object = 'list', + data = [ + null + ] + ) + else: + return ListHardware200ResponseOneOf( + object = 'list', + data = [ + null + ], + ) + """ + + def testListHardware200ResponseOneOf(self): + """Test ListHardware200ResponseOneOf""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of1.py b/src/together/generated/test/test_list_hardware200_response_one_of1.py new file mode 100644 index 00000000..2925384c --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response_one_of1.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response_one_of1 import ( + ListHardware200ResponseOneOf1, +) + + +class TestListHardware200ResponseOneOf1(unittest.TestCase): + """ListHardware200ResponseOneOf1 unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200ResponseOneOf1: + """Test ListHardware200ResponseOneOf1 + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200ResponseOneOf1` + """ + model = ListHardware200ResponseOneOf1() + if include_optional: + return ListHardware200ResponseOneOf1( + object = 'list', + data = [ + null + ] + ) + else: + return ListHardware200ResponseOneOf1( + object = 'list', + data = [ + null + ], + ) + """ + + def testListHardware200ResponseOneOf1(self): + """Test ListHardware200ResponseOneOf1""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py b/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py new file mode 100644 index 00000000..a51e7dbe --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response_one_of1_data_inner.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response_one_of1_data_inner import ( + ListHardware200ResponseOneOf1DataInner, +) + + +class TestListHardware200ResponseOneOf1DataInner(unittest.TestCase): + """ListHardware200ResponseOneOf1DataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200ResponseOneOf1DataInner: + """Test ListHardware200ResponseOneOf1DataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200ResponseOneOf1DataInner` + """ + model = ListHardware200ResponseOneOf1DataInner() + if include_optional: + return ListHardware200ResponseOneOf1DataInner( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = together.generated.models.hardware_availability.HardwareAvailability( + status = 'available', ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') + ) + else: + return ListHardware200ResponseOneOf1DataInner( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = together.generated.models.hardware_availability.HardwareAvailability( + status = 'available', ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + ) + """ + + def testListHardware200ResponseOneOf1DataInner(self): + """Test ListHardware200ResponseOneOf1DataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py b/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py new file mode 100644 index 00000000..e6193a14 --- /dev/null +++ b/src/together/generated/test/test_list_hardware200_response_one_of_data_inner.py @@ -0,0 +1,75 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.list_hardware200_response_one_of_data_inner import ( + ListHardware200ResponseOneOfDataInner, +) + + +class TestListHardware200ResponseOneOfDataInner(unittest.TestCase): + """ListHardware200ResponseOneOfDataInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ListHardware200ResponseOneOfDataInner: + """Test ListHardware200ResponseOneOfDataInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ListHardware200ResponseOneOfDataInner` + """ + model = ListHardware200ResponseOneOfDataInner() + if include_optional: + return ListHardware200ResponseOneOfDataInner( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + availability = None, + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f') + ) + else: + return ListHardware200ResponseOneOfDataInner( + object = 'hardware', + name = '', + pricing = together.generated.models.endpoint_pricing.EndpointPricing( + cents_per_minute = 1.337, ), + specs = together.generated.models.hardware_spec.HardwareSpec( + gpu_type = '', + gpu_link = '', + gpu_memory = 1.337, + gpu_count = 56, ), + updated_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + ) + """ + + def testListHardware200ResponseOneOfDataInner(self): + """Test ListHardware200ResponseOneOfDataInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_lo_ra_training_type.py b/src/together/generated/test/test_lo_ra_training_type.py new file mode 100644 index 00000000..dcd0309c --- /dev/null +++ b/src/together/generated/test/test_lo_ra_training_type.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.lo_ra_training_type import LoRATrainingType + + +class TestLoRATrainingType(unittest.TestCase): + """LoRATrainingType unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> LoRATrainingType: + """Test LoRATrainingType + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `LoRATrainingType` + """ + model = LoRATrainingType() + if include_optional: + return LoRATrainingType( + type = 'Lora', + lora_r = 56, + lora_alpha = 56, + lora_dropout = 1.337, + lora_trainable_modules = 'all-linear' + ) + else: + return LoRATrainingType( + type = 'Lora', + lora_r = 56, + lora_alpha = 56, + ) + """ + + def testLoRATrainingType(self): + """Test LoRATrainingType""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_logprobs_part.py b/src/together/generated/test/test_logprobs_part.py new file mode 100644 index 00000000..b37d38e9 --- /dev/null +++ b/src/together/generated/test/test_logprobs_part.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.logprobs_part import LogprobsPart + + +class TestLogprobsPart(unittest.TestCase): + """LogprobsPart unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> LogprobsPart: + """Test LogprobsPart + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `LogprobsPart` + """ + model = LogprobsPart() + if include_optional: + return LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ] + ) + else: + return LogprobsPart( + ) + """ + + def testLogprobsPart(self): + """Test LogprobsPart""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_lr_scheduler.py b/src/together/generated/test/test_lr_scheduler.py new file mode 100644 index 00000000..281f4102 --- /dev/null +++ b/src/together/generated/test/test_lr_scheduler.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.lr_scheduler import LRScheduler + + +class TestLRScheduler(unittest.TestCase): + """LRScheduler unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> LRScheduler: + """Test LRScheduler + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `LRScheduler` + """ + model = LRScheduler() + if include_optional: + return LRScheduler( + lr_scheduler_type = '', + lr_scheduler_args = together.generated.models.linear_lr_scheduler_args.LinearLRSchedulerArgs( + min_lr_ratio = 1.337, ) + ) + else: + return LRScheduler( + lr_scheduler_type = '', + ) + """ + + def testLRScheduler(self): + """Test LRScheduler""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_model_info.py b/src/together/generated/test/test_model_info.py new file mode 100644 index 00000000..24c4f5ca --- /dev/null +++ b/src/together/generated/test/test_model_info.py @@ -0,0 +1,71 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.model_info import ModelInfo + + +class TestModelInfo(unittest.TestCase): + """ModelInfo unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ModelInfo: + """Test ModelInfo + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ModelInfo` + """ + model = ModelInfo() + if include_optional: + return ModelInfo( + id = 'Austism/chronos-hermes-13b', + object = 'model', + created = 1692896905, + type = 'chat', + display_name = 'Chronos Hermes (13B)', + organization = 'Austism', + link = '', + license = 'other', + context_length = 2048, + pricing = together.generated.models.pricing.Pricing( + hourly = 0, + input = 0.3, + output = 0.3, + base = 0, + finetune = 0, ) + ) + else: + return ModelInfo( + id = 'Austism/chronos-hermes-13b', + object = 'model', + created = 1692896905, + type = 'chat', + ) + """ + + def testModelInfo(self): + """Test ModelInfo""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_models_api.py b/src/together/generated/test/test_models_api.py new file mode 100644 index 00000000..0ba1e2b4 --- /dev/null +++ b/src/together/generated/test/test_models_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.models_api import ModelsApi + + +class TestModelsApi(unittest.IsolatedAsyncioTestCase): + """ModelsApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = ModelsApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_models(self) -> None: + """Test case for models + + List all models + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_pricing.py b/src/together/generated/test/test_pricing.py new file mode 100644 index 00000000..8cf572bd --- /dev/null +++ b/src/together/generated/test/test_pricing.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.pricing import Pricing + + +class TestPricing(unittest.TestCase): + """Pricing unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> Pricing: + """Test Pricing + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `Pricing` + """ + model = Pricing() + if include_optional: + return Pricing( + hourly = 0, + input = 0.3, + output = 0.3, + base = 0, + finetune = 0 + ) + else: + return Pricing( + hourly = 0, + input = 0.3, + output = 0.3, + base = 0, + finetune = 0, + ) + """ + + def testPricing(self): + """Test Pricing""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_prompt_part_inner.py b/src/together/generated/test/test_prompt_part_inner.py new file mode 100644 index 00000000..5d588326 --- /dev/null +++ b/src/together/generated/test/test_prompt_part_inner.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.prompt_part_inner import PromptPartInner + + +class TestPromptPartInner(unittest.TestCase): + """PromptPartInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> PromptPartInner: + """Test PromptPartInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `PromptPartInner` + """ + model = PromptPartInner() + if include_optional: + return PromptPartInner( + text = '[INST] What is the capital of France? [/INST]', + logprobs = together.generated.models.logprobs_part.LogprobsPart( + token_ids = [ + 1.337 + ], + tokens = [ + '' + ], + token_logprobs = [ + 1.337 + ], ) + ) + else: + return PromptPartInner( + ) + """ + + def testPromptPartInner(self): + """Test PromptPartInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_api.py b/src/together/generated/test/test_rerank_api.py new file mode 100644 index 00000000..2acd64cd --- /dev/null +++ b/src/together/generated/test/test_rerank_api.py @@ -0,0 +1,38 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.api.rerank_api import RerankApi + + +class TestRerankApi(unittest.IsolatedAsyncioTestCase): + """RerankApi unit test stubs""" + + async def asyncSetUp(self) -> None: + self.api = RerankApi() + + async def asyncTearDown(self) -> None: + await self.api.api_client.close() + + async def test_rerank(self) -> None: + """Test case for rerank + + Create a rerank request + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_request.py b/src/together/generated/test/test_rerank_request.py new file mode 100644 index 00000000..c8489b9f --- /dev/null +++ b/src/together/generated/test/test_rerank_request.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_request import RerankRequest + + +class TestRerankRequest(unittest.TestCase): + """RerankRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankRequest: + """Test RerankRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankRequest` + """ + model = RerankRequest() + if include_optional: + return RerankRequest( + model = 'Salesforce/Llama-Rank-V1', + query = 'What animals can I find near Peru?', + documents = [{title=Llama, text=The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.}, {title=Panda, text=The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.}, {title=Guanaco, text=The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.}, {title=Wild Bactrian camel, text=The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.}], + top_n = 2, + return_documents = True, + rank_fields = [title, text] + ) + else: + return RerankRequest( + model = 'Salesforce/Llama-Rank-V1', + query = 'What animals can I find near Peru?', + documents = [{title=Llama, text=The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era.}, {title=Panda, text=The giant panda (Ailuropoda melanoleuca), also known as the panda bear or simply panda, is a bear species endemic to China.}, {title=Guanaco, text=The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations.}, {title=Wild Bactrian camel, text=The wild Bactrian camel (Camelus ferus) is an endangered species of camel endemic to Northwest China and southwestern Mongolia.}], + ) + """ + + def testRerankRequest(self): + """Test RerankRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_request_documents.py b/src/together/generated/test/test_rerank_request_documents.py new file mode 100644 index 00000000..53fe08af --- /dev/null +++ b/src/together/generated/test/test_rerank_request_documents.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_request_documents import RerankRequestDocuments + + +class TestRerankRequestDocuments(unittest.TestCase): + """RerankRequestDocuments unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankRequestDocuments: + """Test RerankRequestDocuments + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankRequestDocuments` + """ + model = RerankRequestDocuments() + if include_optional: + return RerankRequestDocuments( + ) + else: + return RerankRequestDocuments( + ) + """ + + def testRerankRequestDocuments(self): + """Test RerankRequestDocuments""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_request_model.py b/src/together/generated/test/test_rerank_request_model.py new file mode 100644 index 00000000..285741b2 --- /dev/null +++ b/src/together/generated/test/test_rerank_request_model.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_request_model import RerankRequestModel + + +class TestRerankRequestModel(unittest.TestCase): + """RerankRequestModel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankRequestModel: + """Test RerankRequestModel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankRequestModel` + """ + model = RerankRequestModel() + if include_optional: + return RerankRequestModel( + ) + else: + return RerankRequestModel( + ) + """ + + def testRerankRequestModel(self): + """Test RerankRequestModel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_response.py b/src/together/generated/test/test_rerank_response.py new file mode 100644 index 00000000..187a5ac1 --- /dev/null +++ b/src/together/generated/test/test_rerank_response.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_response import RerankResponse + + +class TestRerankResponse(unittest.TestCase): + """RerankResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankResponse: + """Test RerankResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankResponse` + """ + model = RerankResponse() + if include_optional: + return RerankResponse( + object = 'rerank', + id = '9dfa1a09-5ebc-4a40-970f-586cb8f4ae47', + model = 'salesforce/turboranker-0.8-3778-6328', + results = [{index=0, relevance_score=0.29980177813003117, document={text={"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}}}, {index=2, relevance_score=0.2752447527354349, document={text={"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}}}], + usage = {prompt_tokens=1837, completion_tokens=0, total_tokens=1837} + ) + else: + return RerankResponse( + object = 'rerank', + model = 'salesforce/turboranker-0.8-3778-6328', + results = [{index=0, relevance_score=0.29980177813003117, document={text={"title":"Llama","text":"The llama is a domesticated South American camelid, widely used as a meat and pack animal by Andean cultures since the pre-Columbian era."}}}, {index=2, relevance_score=0.2752447527354349, document={text={"title":"Guanaco","text":"The guanaco is a camelid native to South America, closely related to the llama. Guanacos are one of two wild South American camelids; the other species is the vicuña, which lives at higher elevations."}}}], + ) + """ + + def testRerankResponse(self): + """Test RerankResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_response_results_inner.py b/src/together/generated/test/test_rerank_response_results_inner.py new file mode 100644 index 00000000..1ff263c5 --- /dev/null +++ b/src/together/generated/test/test_rerank_response_results_inner.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_response_results_inner import ( + RerankResponseResultsInner, +) + + +class TestRerankResponseResultsInner(unittest.TestCase): + """RerankResponseResultsInner unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankResponseResultsInner: + """Test RerankResponseResultsInner + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankResponseResultsInner` + """ + model = RerankResponseResultsInner() + if include_optional: + return RerankResponseResultsInner( + index = 56, + relevance_score = 1.337, + document = together.generated.models.rerank_response_results_inner_document.RerankResponse_results_inner_document( + text = '', ) + ) + else: + return RerankResponseResultsInner( + index = 56, + relevance_score = 1.337, + document = together.generated.models.rerank_response_results_inner_document.RerankResponse_results_inner_document( + text = '', ), + ) + """ + + def testRerankResponseResultsInner(self): + """Test RerankResponseResultsInner""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_rerank_response_results_inner_document.py b/src/together/generated/test/test_rerank_response_results_inner_document.py new file mode 100644 index 00000000..02fb87ce --- /dev/null +++ b/src/together/generated/test/test_rerank_response_results_inner_document.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.rerank_response_results_inner_document import ( + RerankResponseResultsInnerDocument, +) + + +class TestRerankResponseResultsInnerDocument(unittest.TestCase): + """RerankResponseResultsInnerDocument unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RerankResponseResultsInnerDocument: + """Test RerankResponseResultsInnerDocument + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RerankResponseResultsInnerDocument` + """ + model = RerankResponseResultsInnerDocument() + if include_optional: + return RerankResponseResultsInnerDocument( + text = '' + ) + else: + return RerankResponseResultsInnerDocument( + ) + """ + + def testRerankResponseResultsInnerDocument(self): + """Test RerankResponseResultsInnerDocument""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_stream_sentinel.py b/src/together/generated/test/test_stream_sentinel.py new file mode 100644 index 00000000..58961b3f --- /dev/null +++ b/src/together/generated/test/test_stream_sentinel.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.stream_sentinel import StreamSentinel + + +class TestStreamSentinel(unittest.TestCase): + """StreamSentinel unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> StreamSentinel: + """Test StreamSentinel + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `StreamSentinel` + """ + model = StreamSentinel() + if include_optional: + return StreamSentinel( + data = '[DONE]' + ) + else: + return StreamSentinel( + data = '[DONE]', + ) + """ + + def testStreamSentinel(self): + """Test StreamSentinel""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_tool_choice.py b/src/together/generated/test/test_tool_choice.py new file mode 100644 index 00000000..b34a312a --- /dev/null +++ b/src/together/generated/test/test_tool_choice.py @@ -0,0 +1,64 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.tool_choice import ToolChoice + + +class TestToolChoice(unittest.TestCase): + """ToolChoice unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ToolChoice: + """Test ToolChoice + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ToolChoice` + """ + model = ToolChoice() + if include_optional: + return ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ) + ) + else: + return ToolChoice( + index = 1.337, + id = '', + type = 'function', + function = together.generated.models.tool_choice_function.ToolChoice_function( + name = 'function_name', + arguments = '', ), + ) + """ + + def testToolChoice(self): + """Test ToolChoice""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_tool_choice_function.py b/src/together/generated/test/test_tool_choice_function.py new file mode 100644 index 00000000..d7a2a8fa --- /dev/null +++ b/src/together/generated/test/test_tool_choice_function.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.tool_choice_function import ToolChoiceFunction + + +class TestToolChoiceFunction(unittest.TestCase): + """ToolChoiceFunction unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ToolChoiceFunction: + """Test ToolChoiceFunction + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ToolChoiceFunction` + """ + model = ToolChoiceFunction() + if include_optional: + return ToolChoiceFunction( + name = 'function_name', + arguments = '' + ) + else: + return ToolChoiceFunction( + name = 'function_name', + arguments = '', + ) + """ + + def testToolChoiceFunction(self): + """Test ToolChoiceFunction""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_tools_part.py b/src/together/generated/test/test_tools_part.py new file mode 100644 index 00000000..6f3aad82 --- /dev/null +++ b/src/together/generated/test/test_tools_part.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.tools_part import ToolsPart + + +class TestToolsPart(unittest.TestCase): + """ToolsPart unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ToolsPart: + """Test ToolsPart + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ToolsPart` + """ + model = ToolsPart() + if include_optional: + return ToolsPart( + type = 'tool_type', + function = together.generated.models.tools_part_function.ToolsPart_function( + description = 'A description of the function.', + name = 'function_name', + parameters = { }, ) + ) + else: + return ToolsPart( + ) + """ + + def testToolsPart(self): + """Test ToolsPart""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_tools_part_function.py b/src/together/generated/test/test_tools_part_function.py new file mode 100644 index 00000000..35de3a5b --- /dev/null +++ b/src/together/generated/test/test_tools_part_function.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.tools_part_function import ToolsPartFunction + + +class TestToolsPartFunction(unittest.TestCase): + """ToolsPartFunction unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> ToolsPartFunction: + """Test ToolsPartFunction + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `ToolsPartFunction` + """ + model = ToolsPartFunction() + if include_optional: + return ToolsPartFunction( + description = 'A description of the function.', + name = 'function_name', + parameters = { } + ) + else: + return ToolsPartFunction( + ) + """ + + def testToolsPartFunction(self): + """Test ToolsPartFunction""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_update_endpoint_request.py b/src/together/generated/test/test_update_endpoint_request.py new file mode 100644 index 00000000..3ce2db6f --- /dev/null +++ b/src/together/generated/test/test_update_endpoint_request.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.update_endpoint_request import UpdateEndpointRequest + + +class TestUpdateEndpointRequest(unittest.TestCase): + """UpdateEndpointRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> UpdateEndpointRequest: + """Test UpdateEndpointRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `UpdateEndpointRequest` + """ + model = UpdateEndpointRequest() + if include_optional: + return UpdateEndpointRequest( + display_name = 'My Llama3 70b endpoint', + state = 'STARTED', + autoscaling = together.generated.models.autoscaling.Autoscaling( + min_replicas = 56, + max_replicas = 56, ) + ) + else: + return UpdateEndpointRequest( + ) + """ + + def testUpdateEndpointRequest(self): + """Test UpdateEndpointRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/together/generated/test/test_usage_data.py b/src/together/generated/test/test_usage_data.py new file mode 100644 index 00000000..e8de8bd8 --- /dev/null +++ b/src/together/generated/test/test_usage_data.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Together APIs + + The Together REST API. Please see https://docs.together.ai for more details. + + The version of the OpenAPI document: 2.0.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from together.generated.models.usage_data import UsageData + + +class TestUsageData(unittest.TestCase): + """UsageData unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> UsageData: + """Test UsageData + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `UsageData` + """ + model = UsageData() + if include_optional: + return UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56 + ) + else: + return UsageData( + prompt_tokens = 56, + completion_tokens = 56, + total_tokens = 56, + ) + """ + + def testUsageData(self): + """Test UsageData""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main()