diff --git a/sdk/python/jobs/finetuning/standalone/chat-completion/chat_completion_with_model_as_service.ipynb b/sdk/python/jobs/finetuning/standalone/chat-completion/chat_completion_with_model_as_service.ipynb index d8f9bebf7ba..c5ae2dce4de 100644 --- a/sdk/python/jobs/finetuning/standalone/chat-completion/chat_completion_with_model_as_service.ipynb +++ b/sdk/python/jobs/finetuning/standalone/chat-completion/chat_completion_with_model_as_service.ipynb @@ -7,7 +7,7 @@ "source": [ "## FineTuning LLM with Model-As-Service\n", "\n", - "This sample shows how use create a standalone FineTuning job to fine tune a model to summarize a dialog between 2 people using samsum dataset.\n", + "This sample shows how use create a standalone FineTuning job to fine tune a model using ultrachat data.\n", "\n", "#### Training data\n", "We use the [ultrachat_200k](https://huggingface.co/datasets/samsum) dataset. The dataset has four splits, suitable for:\n", @@ -15,7 +15,7 @@ "* Generation ranking (gen).\n", "\n", "#### Model\n", - "We will use the Phi-3-mini-4k-instruct model to show how user can finetune a model for chat-completion task. If you opened this notebook from a specific model card, remember to replace the specific model name. \n", + "We will use the Ministral-3B model to show how user can finetune a model for chat-completion task. If you opened this notebook from a specific model card, remember to replace the specific model name. \n", "\n", "#### Outline\n", "1. Setup pre-requisites\n", @@ -96,7 +96,7 @@ "\n", "# the models, fine tuning pipelines and environments are available in various AzureML system registries,\n", "# Example: Phi family of models are in \"azureml\", Llama family of models are in \"azureml-meta\" registry.\n", - "registry_ml_client = MLClient(credential, registry_name=\"azureml-meta\")\n", + "registry_ml_client = MLClient(credential, registry_name=\"azureml-mistral\")\n", "\n", "# Get AzureML workspace object.\n", "workspace = workspace_ml_client._workspaces.get(workspace_ml_client.workspace_name)\n", @@ -110,7 +110,7 @@ "source": [ "### 2. Pick a foundation model to fine tune\n", "\n", - "`Phi-3-mini-4k-instruct` is a 3.8B parameters, lightweight, state-of-the-art open model built upon datasets used for Phi-2. The model belongs to the Phi-3 model family, and the Mini version comes in two variants 4K and 128K which is the context length (in tokens) it can support. You can browse these models in the Model Catalog in the Azure AI Studio, filtering by the `chat-completion` task. In this example, we use the `Phi-3-mini-4k-instruct` model. If you have opened this notebook for a different model, replace the model name and version accordingly.\n", + "`Ministral-3B` is a 3B parameters, lightweight, state-of-the-art open model. The model belongs to the mistral model family. You can browse these models in the Model Catalog in the Azure AI Studio, filtering by the `chat-completion` task. In this example, we use the `Ministral-3B` model. If you have opened this notebook for a different model, replace the model name and version accordingly.\n", "\n", "Note the model id property of the model. This will be passed as input to the fine tuning job. This is also available as the `Asset ID` field in model details page in Azure AI Studio Model Catalog." ] @@ -121,7 +121,7 @@ "metadata": {}, "outputs": [], "source": [ - "model_name = \"Phi-3-mini-4k-instruct\" # \"Meta-Llama-3.1-8B-Instruct\"\n", + "model_name = \"Ministral-3B\"\n", "foundation_model = registry_ml_client.models.get(model_name, label=\"latest\")\n", "print(\n", " \"\\n\\nUsing model name: {0}, version: {1}, id: {2} for fine tuning\".format(\n", @@ -139,7 +139,7 @@ "from azure.ai.ml.constants._common import AssetTypes\n", "from azure.ai.ml.entities._inputs_outputs import Input\n", "\n", - "model_to_finetune = Input(type=AssetTypes.MLFLOW_MODEL, path=foundation_model.id)" + "model_to_finetune = Input(type=AssetTypes.CUSTOM_MODEL, path=foundation_model.id)" ] }, { @@ -401,7 +401,7 @@ " validation_data=validation_data,\n", " hyperparameters={\n", " \"per_device_train_batch_size\": \"1\",\n", - " \"learning_rate\": \"0.00002\",\n", + " \"learning_rate\": \"0.0001\",\n", " \"num_train_epochs\": \"1\",\n", " },\n", " model=model_to_finetune,\n",