|
|
import gradio as gr |
|
|
from transformers import AutoModel |
|
|
from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError |
|
|
|
|
|
def check_model_access(model_id: str, oauth_token: gr.OAuthToken | None): |
|
|
""" |
|
|
Checks if the logged-in user can load the configuration for a given model on the Hub. |
|
|
Loading the config proves file-level access to the repository. |
|
|
|
|
|
Args: |
|
|
model_id: The ID of the model to check (e.g., "meta-llama/Llama-2-7b-chat-hf"). |
|
|
oauth_token: The user's OAuth token, automatically injected by Gradio. |
|
|
|
|
|
Returns: |
|
|
A string with the result of the access check, formatted for Markdown. |
|
|
""" |
|
|
|
|
|
if oauth_token is None: |
|
|
return "### <span style='color: red;'>Authentication Error π΄</span>\nPlease log in using the button above to check model access." |
|
|
|
|
|
|
|
|
if not model_id: |
|
|
return "### <span style='color: orange;'>Input Missing π‘</span>\nPlease enter a model ID to check." |
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
model = AutoModel.from_pretrained(pretrained_model_name_or_path=model_id, token=oauth_token.token) |
|
|
|
|
|
|
|
|
return f""" |
|
|
### <span style='color: green;'>Access Granted β
</span> |
|
|
Successfully loaded the configuration for **{model_id}**. |
|
|
|
|
|
- **Model Type:** `{model}` |
|
|
- **Architecture:** |
|
|
- token : {oauth_token.token} |
|
|
""" |
|
|
|
|
|
except RepositoryNotFoundError: |
|
|
|
|
|
return f"### <span style='color: red;'>Not Found π΄</span>\nThe repository **{model_id}** does not exist." |
|
|
|
|
|
except HfHubHTTPError as e: |
|
|
|
|
|
if e.response.status_code in [401, 403]: |
|
|
return f""" |
|
|
### <span style='color: red;'>Access Denied π΄</span> |
|
|
You do not have permission to download files from **{model_id}**. |
|
|
|
|
|
- Please ensure you have accepted the terms and conditions on the model's page. |
|
|
- This might be a private model you don't have access to. |
|
|
- **Status Code:** {e.response.status_code} |
|
|
""" |
|
|
else: |
|
|
return f"### <span style='color: red;'>An Error Occurred π΄</span>\n**Details:** {str(e)}" |
|
|
|
|
|
|
|
|
with gr.Blocks(css="h1 { text-align: center; }") as demo: |
|
|
gr.Markdown("# Gated Model Access Tester") |
|
|
gr.Markdown("Log in with your Hugging Face account and enter a model ID. This will attempt to load the model's `config.json` file to verify access.") |
|
|
|
|
|
gr.LoginButton() |
|
|
|
|
|
with gr.Row(): |
|
|
model_id_input = gr.Textbox( |
|
|
label="Model ID", |
|
|
placeholder="e.g., meta-llama/Llama-2-7b-chat-hf", |
|
|
scale=3, |
|
|
) |
|
|
check_button = gr.Button("Check Access", variant="primary", scale=1) |
|
|
|
|
|
result_display = gr.Markdown("### Result will be displayed here.") |
|
|
|
|
|
check_button.click( |
|
|
fn=check_model_access, |
|
|
inputs=[model_id_input], |
|
|
outputs=[result_display] |
|
|
) |
|
|
|
|
|
demo.launch() |