From e8bf3562732cc885a8964a6944b1cfd68d71cbbf Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 08:21:21 -0800 Subject: [PATCH 01/17] changes --- gradio/external.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index 463e32aa4a29e..2c7f3c7978c7c 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -36,6 +36,7 @@ from gradio.blocks import Blocks from gradio.chat_interface import ChatInterface from gradio.components.chatbot import MessageDict + from gradio.components.login_button import LoginButton from gradio.interface import Interface @@ -47,7 +48,7 @@ def load( | None = None, token: str | None = None, hf_token: str | None = None, - accept_token: bool = False, + accept_token: bool | LoginButton = False, provider: PROVIDER_T | None = None, **kwargs, ) -> Blocks: @@ -57,7 +58,7 @@ def load( name: the name of the model (e.g. "google/vit-base-patch16-224") or Space (e.g. "flax-community/spanish-gpt2"). This is the first parameter passed into the `src` function. Can also be formatted as {src}/{repo name} (e.g. "models/google/vit-base-patch16-224") if `src` is not provided. src: function that accepts a string model `name` and a string or None `token` and returns a Gradio app. Alternatively, this parameter takes one of two strings for convenience: "models" (for loading a Hugging Face model through the Inference API) or "spaces" (for loading a Hugging Face Space). If None, uses the prefix of the `name` parameter to determine `src`. token: optional token that is passed as the second parameter to the `src` function. If not explicitly provided, will use the HF_TOKEN environment variable or fallback to the locally-saved HF token when loading models but not Spaces (when loading Spaces, only provide a token if you are loading a trusted private Space as the token can be read by the Space you are loading). Find your HF tokens here: https://huggingface.co/settings/tokens. - accept_token: if True, a Textbox component is first rendered to allow the user to provide a token, which will be used instead of the `token` parameter when calling the loaded model or Space. + accept_token: if True, a Textbox component is first rendered to allow the user to provide a token, which will be used instead of the `token` parameter when calling the loaded model or Space. If LoginButton, a LoginButton component is rendered, which allows the user to login with a Hugging Face account whose token will be used instead of the `token` parameter when calling the loaded model or Space. kwargs: additional keyword parameters to pass into the `src` function. If `src` is "models" or "Spaces", these parameters are passed into the `gr.Interface` or `gr.ChatInterface` constructor. provider: the name of the third-party (non-Hugging Face) providers to use for model inference (e.g. "replicate", "sambanova", "fal-ai", etc). Should be one of the providers supported by `huggingface_hub.InferenceClient`. This parameter is only used when `src` is "models" Returns: @@ -67,6 +68,8 @@ def load( demo = gr.load("gradio/question-answering", src="spaces") demo.launch() """ + import gradio as gr + if hf_token is not None and token is None: token = hf_token warnings.warn( @@ -93,15 +96,21 @@ def load( ): token = os.environ.get("HF_TOKEN") + if isinstance(src, Callable): + return src(name, token, **kwargs) + if not accept_token: - if isinstance(src, Callable): - return src(name, token, **kwargs) return load_blocks_from_huggingface( name=name, src=src, hf_token=token, provider=provider, **kwargs ) + elif isinstance(accept_token, gr.LoginButton): + with gr.Blocks(fill_height=True) as demo: + accept_token.render() + load_blocks_from_huggingface( + name=name, src=src, hf_token=token, provider=provider, **kwargs + ) + return demo else: - import gradio as gr - with gr.Blocks(fill_height=True) as demo: with gr.Accordion("Enter your token and press enter") as accordion: textbox = gr.Textbox( @@ -140,8 +149,6 @@ def load_token(token_value): @gr.render(inputs=[textbox], triggers=[textbox.submit]) def create(token_value): - if isinstance(src, Callable): - return src(name, token_value, **kwargs) return load_blocks_from_huggingface( name=name, src=src, hf_token=token_value, **kwargs ) From a1aba918049e780277979d023fe2f0f6525cf3be Mon Sep 17 00:00:00 2001 From: gradio-pr-bot Date: Wed, 12 Feb 2025 16:22:50 +0000 Subject: [PATCH 02/17] add changeset --- .changeset/four-wasps-hug.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/four-wasps-hug.md diff --git a/.changeset/four-wasps-hug.md b/.changeset/four-wasps-hug.md new file mode 100644 index 0000000000000..8f15d63e26751 --- /dev/null +++ b/.changeset/four-wasps-hug.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Support `gr.LoginButton` for `gr.load()` From 57c66e036f887202967bd6533744b95577a8f9c4 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 11:46:17 -0800 Subject: [PATCH 03/17] tests --- gradio/external.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index 2c7f3c7978c7c..9018e3c4083fe 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -20,7 +20,7 @@ from gradio_client.utils import encode_url_or_file_to_base64 from packaging import version -import gradio +import gradio as gr from gradio import components, external_utils, utils from gradio.components.multimodal_textbox import MultimodalValue from gradio.context import Context @@ -105,10 +105,16 @@ def load( ) elif isinstance(accept_token, gr.LoginButton): with gr.Blocks(fill_height=True) as demo: - accept_token.render() - load_blocks_from_huggingface( - name=name, src=src, hf_token=token, provider=provider, **kwargs - ) + if not accept_token.is_rendered: + accept_token.render() + + @gr.render(triggers=[demo.load]) + def create_blocks(oauth_token: gr.OAuthToken | None): + token_value = "" if oauth_token is None else oauth_token.token + return load_blocks_from_huggingface( + name=name, src=src, hf_token=token_value, **kwargs + ) + return demo else: with gr.Blocks(fill_height=True) as demo: @@ -180,7 +186,7 @@ def load_blocks_from_huggingface( if src == "spaces" and hf_token is None: hf_token = False # Since Spaces can read the token, we don't want to pass it in unless the user explicitly provides it - blocks: gradio.Blocks = factory_methods[src]( + blocks: gr.Blocks = factory_methods[src]( name, hf_token=hf_token, alias=alias, provider=provider, **kwargs ) return blocks @@ -471,7 +477,7 @@ def query_huggingface_inference_endpoints(*data): } kwargs = dict(interface_info, **kwargs) - interface = gradio.Interface(**kwargs) + interface = gr.Interface(**kwargs) return interface @@ -567,7 +573,7 @@ def from_spaces_blocks(space: str, hf_token: str | None | Literal[False]) -> Blo predict_fns.append(endpoint.make_end_to_end_fn(helper)) else: predict_fns.append(None) - return gradio.Blocks.from_config(client.config, predict_fns, client.src) # type: ignore + return gr.Blocks.from_config(client.config, predict_fns, client.src) # type: ignore def from_spaces_interface( @@ -612,7 +618,7 @@ def fn(*data): kwargs = dict(config, **kwargs) kwargs["_api_mode"] = True - interface = gradio.Interface(**kwargs) + interface = gr.Interface(**kwargs) return interface @@ -786,7 +792,7 @@ def load_chat( raise ImportError( "To use OpenAI API Client, you must install the `openai` package. You can install it with `pip install openai`." ) from e - from gradio.chat_interface import ChatInterface + from gr.chat_interface import ChatInterface client = OpenAI(api_key=token, base_url=base_url) start_message = ( @@ -841,7 +847,7 @@ def open_api_stream( open_api_stream if streaming else open_api, type="messages", multimodal=bool(file_types), - textbox=gradio.MultimodalTextbox(file_types=supported_extensions) + textbox=gr.MultimodalTextbox(file_types=supported_extensions) if file_types else None, **kwargs, From 926995ac30dc38c4782b02993611e224e1a9c770 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 11:57:10 -0800 Subject: [PATCH 04/17] changes --- gradio/external.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradio/external.py b/gradio/external.py index 9018e3c4083fe..a50293487a384 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -110,7 +110,7 @@ def load( @gr.render(triggers=[demo.load]) def create_blocks(oauth_token: gr.OAuthToken | None): - token_value = "" if oauth_token is None else oauth_token.token + token_value = None if oauth_token is None else oauth_token.token return load_blocks_from_huggingface( name=name, src=src, hf_token=token_value, **kwargs ) From db222d92148cdab8b855cf846a727fbcee1a40e3 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 12:43:46 -0800 Subject: [PATCH 05/17] type --- gradio/external.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index a50293487a384..83e7e198465cc 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -804,7 +804,7 @@ def open_api(message: str | MultimodalValue, history: list | None) -> str | None history = history or start_message if len(history) > 0 and isinstance(history[0], (list, tuple)): history = ChatInterface._tuples_to_messages(history) - conversation = format_conversation(history, message) + conversation = format_conversation(history, message) # type: ignore return ( client.chat.completions.create( model=model, @@ -820,7 +820,7 @@ def open_api_stream( history = history or start_message if len(history) > 0 and isinstance(history[0], (list, tuple)): history = ChatInterface._tuples_to_messages(history) - conversation = format_conversation(history, message) + conversation = format_conversation(history, message) # type: ignore stream = client.chat.completions.create( model=model, messages=conversation, # type: ignore From 96ad50d639d0a8fd9dd53684aedec8729361ab01 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 14:09:11 -0800 Subject: [PATCH 06/17] changes --- gradio/external.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index 83e7e198465cc..8a0c35d893339 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -165,43 +165,40 @@ def create(token_value): def load_blocks_from_huggingface( name: str, src: str, - hf_token: str | Literal[False] | None = None, + hf_token: str | None = None, alias: str | None = None, provider: PROVIDER_T | None = None, **kwargs, ) -> Blocks: """Creates and returns a Blocks instance from a Hugging Face model or Space repo.""" - factory_methods: dict[str, Callable] = { - # for each repo type, we have a method that returns the Interface given the model name & optionally an hf_token - "huggingface": from_model, - "models": from_model, - "spaces": from_spaces, - } - if hf_token is not None and hf_token is not False: + if hf_token is not None: if Context.hf_token is not None and Context.hf_token != hf_token: warnings.warn( """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior.""" ) Context.hf_token = hf_token - if src == "spaces" and hf_token is None: - hf_token = False # Since Spaces can read the token, we don't want to pass it in unless the user explicitly provides it - blocks: gr.Blocks = factory_methods[src]( - name, hf_token=hf_token, alias=alias, provider=provider, **kwargs - ) + if src == "spaces": + # Spaces can read the token, so we don't want to pass it in unless the user explicitly provides it + token = False if hf_token is None else hf_token + blocks = from_spaces( + name, hf_token=token, alias=alias, provider=provider, **kwargs + ) + else: + blocks = from_model( + name, hf_token=hf_token, alias=alias, provider=provider, **kwargs + ) return blocks def from_model( model_name: str, - hf_token: str | Literal[False] | None, + hf_token: str | None, alias: str | None, provider: PROVIDER_T | None = None, **kwargs, ) -> Blocks: headers = {"X-Wait-For-Model": "true"} - if hf_token is False: - hf_token = None client = huggingface_hub.InferenceClient( model=model_name, headers=headers, token=hf_token, provider=provider ) From 97623cc5936e75f1b2ec2413d59faeede23aef44 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 14:10:17 -0800 Subject: [PATCH 07/17] format --- gradio/external.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradio/external.py b/gradio/external.py index 8a0c35d893339..31c078318042d 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -58,7 +58,7 @@ def load( name: the name of the model (e.g. "google/vit-base-patch16-224") or Space (e.g. "flax-community/spanish-gpt2"). This is the first parameter passed into the `src` function. Can also be formatted as {src}/{repo name} (e.g. "models/google/vit-base-patch16-224") if `src` is not provided. src: function that accepts a string model `name` and a string or None `token` and returns a Gradio app. Alternatively, this parameter takes one of two strings for convenience: "models" (for loading a Hugging Face model through the Inference API) or "spaces" (for loading a Hugging Face Space). If None, uses the prefix of the `name` parameter to determine `src`. token: optional token that is passed as the second parameter to the `src` function. If not explicitly provided, will use the HF_TOKEN environment variable or fallback to the locally-saved HF token when loading models but not Spaces (when loading Spaces, only provide a token if you are loading a trusted private Space as the token can be read by the Space you are loading). Find your HF tokens here: https://huggingface.co/settings/tokens. - accept_token: if True, a Textbox component is first rendered to allow the user to provide a token, which will be used instead of the `token` parameter when calling the loaded model or Space. If LoginButton, a LoginButton component is rendered, which allows the user to login with a Hugging Face account whose token will be used instead of the `token` parameter when calling the loaded model or Space. + accept_token: if True, a Textbox component is first rendered to allow the user to provide a token, which will be used instead of the `token` parameter when calling the loaded model or Space. Can also provide an instance of a gr.LoginButton in the same Blocks scope, which allows the user to login with a Hugging Face account whose token will be used instead of the `token` parameter when calling the loaded model or Space. kwargs: additional keyword parameters to pass into the `src` function. If `src` is "models" or "Spaces", these parameters are passed into the `gr.Interface` or `gr.ChatInterface` constructor. provider: the name of the third-party (non-Hugging Face) providers to use for model inference (e.g. "replicate", "sambanova", "fal-ai", etc). Should be one of the providers supported by `huggingface_hub.InferenceClient`. This parameter is only used when `src` is "models" Returns: From 716cfa8d30dd20be899488d5553dddddc30c86a8 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 14:17:57 -0800 Subject: [PATCH 08/17] updates --- gradio/external.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gradio/external.py b/gradio/external.py index 31c078318042d..19ff32a05d346 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -297,6 +297,7 @@ def custom_post_binary(data): "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct." ] ] + postprocess = lambda x: x.summary_text # noqa: E731 fn = client.summarization # Example: distilbert-base-uncased-finetuned-sst-2-english elif p == "text-classification": From 7ab09f1608fc52c9cbd5e78c4839c9b4825747ec Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 14:21:00 -0800 Subject: [PATCH 09/17] testing --- gradio/external.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index 19ff32a05d346..0e2a87e4932c2 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -68,8 +68,6 @@ def load( demo = gr.load("gradio/question-answering", src="spaces") demo.launch() """ - import gradio as gr - if hf_token is not None and token is None: token = hf_token warnings.warn( @@ -111,6 +109,7 @@ def load( @gr.render(triggers=[demo.load]) def create_blocks(oauth_token: gr.OAuthToken | None): token_value = None if oauth_token is None else oauth_token.token + gr.Markdown(token_value) return load_blocks_from_huggingface( name=name, src=src, hf_token=token_value, **kwargs ) From 4d9f1aebc0c74a1df7dd60a9981eed910f756fe6 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 14:21:27 -0800 Subject: [PATCH 10/17] changes --- gradio/external.py | 1 + 1 file changed, 1 insertion(+) diff --git a/gradio/external.py b/gradio/external.py index 0e2a87e4932c2..5b296a07881a0 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -109,6 +109,7 @@ def load( @gr.render(triggers=[demo.load]) def create_blocks(oauth_token: gr.OAuthToken | None): token_value = None if oauth_token is None else oauth_token.token + gr.Markdown("token>>>>>") gr.Markdown(token_value) return load_blocks_from_huggingface( name=name, src=src, hf_token=token_value, **kwargs From cc84e24c2d60e2d99631aab09b96e50727cb81b1 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 14:51:02 -0800 Subject: [PATCH 11/17] changes --- gradio/external.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gradio/external.py b/gradio/external.py index 5b296a07881a0..526482b398f9e 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -199,6 +199,7 @@ def from_model( **kwargs, ) -> Blocks: headers = {"X-Wait-For-Model": "true"} + print("hf_token", hf_token) client = huggingface_hub.InferenceClient( model=model_name, headers=headers, token=hf_token, provider=provider ) @@ -472,6 +473,7 @@ def query_huggingface_inference_endpoints(*data): "outputs": outputs, "title": model_name, "examples": examples, + "cache_mode": "lazy", } kwargs = dict(interface_info, **kwargs) From 793d337ad85ed389b77a86807317d175835077fa Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 15:08:00 -0800 Subject: [PATCH 12/17] cache false --- gradio/external.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradio/external.py b/gradio/external.py index 526482b398f9e..7882f9acb4ec3 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -473,7 +473,7 @@ def query_huggingface_inference_endpoints(*data): "outputs": outputs, "title": model_name, "examples": examples, - "cache_mode": "lazy", + "cache_examples": False, } kwargs = dict(interface_info, **kwargs) From c2a86db3d10441fb9eb1d1efacff07b0f2cdff2d Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Wed, 12 Feb 2025 17:09:50 -0800 Subject: [PATCH 13/17] prints --- gradio/external.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/gradio/external.py b/gradio/external.py index 7882f9acb4ec3..e241dc8871ff9 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -203,6 +203,7 @@ def from_model( client = huggingface_hub.InferenceClient( model=model_name, headers=headers, token=hf_token, provider=provider ) + print("client", client) p, tags = external_utils.get_model_info(model_name, hf_token) # For tasks that are not yet supported by the InferenceClient @@ -454,15 +455,19 @@ def custom_post_binary(data): raise ValueError(f"Unsupported pipeline type: {p}") def query_huggingface_inference_endpoints(*data): + print("data", data) if preprocess is not None: data = preprocess(*data) + print("data after preprocess", data) try: data = fn(*data) # type: ignore + print("data after fn", data) except huggingface_hub.utils.HfHubHTTPError as e: # type: ignore if "429" in str(e): raise TooManyRequestsError() from e if postprocess is not None: data = postprocess(data) # type: ignore + print("data after postprocess", data) return data query_huggingface_inference_endpoints.__name__ = alias or model_name From 3ce3d7bf6153a0dedbeb81f66e70794e43cb85e2 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Tue, 25 Feb 2025 15:09:04 -0800 Subject: [PATCH 14/17] changes --- gradio/external.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index b7eb9c992a538..bbc94292e6353 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -109,8 +109,6 @@ def load( @gr.render(triggers=[demo.load]) def create_blocks(oauth_token: gr.OAuthToken | None): token_value = None if oauth_token is None else oauth_token.token - gr.Markdown("token>>>>>") - gr.Markdown(token_value) return load_blocks_from_huggingface( name=name, src=src, hf_token=token_value, **kwargs ) @@ -199,11 +197,9 @@ def from_model( **kwargs, ) -> Blocks: headers = {"X-Wait-For-Model": "true"} - print("hf_token", hf_token) client = huggingface_hub.InferenceClient( model=model_name, headers=headers, token=hf_token, provider=provider ) - print("client", client) p, tags = external_utils.get_model_info(model_name, hf_token) # For tasks that are not yet supported by the InferenceClient From 987868acc305584ee4545abc55cd4d0cd18f344b Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Tue, 25 Feb 2025 15:26:27 -0800 Subject: [PATCH 15/17] add providers --- gradio/external.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index bbc94292e6353..e1dd4e471af9f 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -110,7 +110,7 @@ def load( def create_blocks(oauth_token: gr.OAuthToken | None): token_value = None if oauth_token is None else oauth_token.token return load_blocks_from_huggingface( - name=name, src=src, hf_token=token_value, **kwargs + name=name, src=src, hf_token=token_value, provider=provider, **kwargs ) return demo @@ -154,7 +154,7 @@ def load_token(token_value): @gr.render(inputs=[textbox], triggers=[textbox.submit]) def create(token_value): return load_blocks_from_huggingface( - name=name, src=src, hf_token=token_value, **kwargs + name=name, src=src, hf_token=token_value, provider=provider, **kwargs ) return demo From dd5e432a6fb0867b9cae4df6b2d64ae7a535408f Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Tue, 25 Feb 2025 15:26:43 -0800 Subject: [PATCH 16/17] format --- gradio/external.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index e1dd4e471af9f..acfc8084f6bff 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -110,7 +110,11 @@ def load( def create_blocks(oauth_token: gr.OAuthToken | None): token_value = None if oauth_token is None else oauth_token.token return load_blocks_from_huggingface( - name=name, src=src, hf_token=token_value, provider=provider, **kwargs + name=name, + src=src, + hf_token=token_value, + provider=provider, + **kwargs, ) return demo @@ -154,7 +158,11 @@ def load_token(token_value): @gr.render(inputs=[textbox], triggers=[textbox.submit]) def create(token_value): return load_blocks_from_huggingface( - name=name, src=src, hf_token=token_value, provider=provider, **kwargs + name=name, + src=src, + hf_token=token_value, + provider=provider, + **kwargs, ) return demo From f3a8f182439fd8a779a7fd93433c5b34f8f78bd3 Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Tue, 25 Feb 2025 15:27:31 -0800 Subject: [PATCH 17/17] changes --- gradio/external.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/gradio/external.py b/gradio/external.py index acfc8084f6bff..707fcff29ae50 100644 --- a/gradio/external.py +++ b/gradio/external.py @@ -464,10 +464,8 @@ def custom_post_binary(data): raise ValueError(f"Unsupported pipeline type: {p}") def query_huggingface_inference_endpoints(*data): - print("data", data) if preprocess is not None: data = preprocess(*data) - print("data after preprocess", data) try: data = fn(*data) # type: ignore print("data after fn", data) @@ -476,7 +474,6 @@ def query_huggingface_inference_endpoints(*data): raise TooManyRequestsError() from e if postprocess is not None: data = postprocess(data) # type: ignore - print("data after postprocess", data) return data query_huggingface_inference_endpoints.__name__ = alias or model_name