File size: 3,417 Bytes
b81e0a9
321ba33
b81e0a9
 
 
 
3a95276
 
b81e0a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a95276
 
321ba33
b81e0a9
 
 
 
3a95276
b81e0a9
321ba33
 
66eb681
b81e0a9
 
 
 
 
 
 
 
3a95276
b81e0a9
 
3a95276
b81e0a9
 
3a95276
b81e0a9
 
 
 
 
3a95276
b81e0a9
 
3a95276
b81e0a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import gradio as gr
from transformers import AutoModel
from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError

def check_model_access(model_id: str, oauth_token: gr.OAuthToken | None):
    """
    Checks if the logged-in user can load the configuration for a given model on the Hub.
    Loading the config proves file-level access to the repository.

    Args:
        model_id: The ID of the model to check (e.g., "meta-llama/Llama-2-7b-chat-hf").
        oauth_token: The user's OAuth token, automatically injected by Gradio.
    
    Returns:
        A string with the result of the access check, formatted for Markdown.
    """
    # 1. Check if the user is logged in
    if oauth_token is None:
        return "### <span style='color: red;'>Authentication Error πŸ”΄</span>\nPlease log in using the button above to check model access."

    # 2. Check if a model ID was provided
    if not model_id:
        return "### <span style='color: orange;'>Input Missing 🟑</span>\nPlease enter a model ID to check."

    try:
        # 3. The core test: try to load the model's config using the user's token.
        # This will fail if the user doesn't have access to the gated repo's files.
        model = AutoModel.from_pretrained(pretrained_model_name_or_path=model_id, token=oauth_token.token)
        
        # 4. If the call succeeds, format a success message
        return f"""
        ### <span style='color: green;'>Access Granted βœ…</span>
        Successfully loaded the configuration for **{model_id}**.
        
        - **Model Type:** `{model}`
        - **Architecture:** 
        - token : {oauth_token.token}
        """
    
    except RepositoryNotFoundError:
        # 5. Handle the case where the repository does not exist
        return f"### <span style='color: red;'>Not Found πŸ”΄</span>\nThe repository **{model_id}** does not exist."
        
    except HfHubHTTPError as e:
        # 6. Handle HTTP errors, which typically indicate permission issues for gated models
        if e.response.status_code in [401, 403]:
            return f"""
            ### <span style='color: red;'>Access Denied πŸ”΄</span>
            You do not have permission to download files from **{model_id}**.
            
            - Please ensure you have accepted the terms and conditions on the model's page.
            - This might be a private model you don't have access to.
            - **Status Code:** {e.response.status_code}
            """
        else:
            return f"### <span style='color: red;'>An Error Occurred πŸ”΄</span>\n**Details:** {str(e)}"

# --- Gradio Interface (No changes needed here) ---
with gr.Blocks(css="h1 { text-align: center; }") as demo:
    gr.Markdown("# Gated Model Access Tester")
    gr.Markdown("Log in with your Hugging Face account and enter a model ID. This will attempt to load the model's `config.json` file to verify access.")
    
    gr.LoginButton()

    with gr.Row():
        model_id_input = gr.Textbox(
            label="Model ID", 
            placeholder="e.g., meta-llama/Llama-2-7b-chat-hf",
            scale=3,
        )
        check_button = gr.Button("Check Access", variant="primary", scale=1)

    result_display = gr.Markdown("### Result will be displayed here.")

    check_button.click(
        fn=check_model_access,
        inputs=[model_id_input],
        outputs=[result_display]
    )

demo.launch()