Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Upload 7 files
Browse files- .gitattributes +35 -35
- README.md +13 -13
- about.py +83 -0
- app.py +78 -0
- css_html_js.py +105 -0
- images/cheetah.jpg +0 -0
- requirements.txt +14 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -1,35 +1,35 @@ | |
| 1 | 
            -
            *.7z filter=lfs diff=lfs merge=lfs -text
         | 
| 2 | 
            -
            *.arrow filter=lfs diff=lfs merge=lfs -text
         | 
| 3 | 
            -
            *.bin filter=lfs diff=lfs merge=lfs -text
         | 
| 4 | 
            -
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         | 
| 5 | 
            -
            *.ckpt filter=lfs diff=lfs merge=lfs -text
         | 
| 6 | 
            -
            *.ftz filter=lfs diff=lfs merge=lfs -text
         | 
| 7 | 
            -
            *.gz filter=lfs diff=lfs merge=lfs -text
         | 
| 8 | 
            -
            *.h5 filter=lfs diff=lfs merge=lfs -text
         | 
| 9 | 
            -
            *.joblib filter=lfs diff=lfs merge=lfs -text
         | 
| 10 | 
            -
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         | 
| 11 | 
            -
            *.mlmodel filter=lfs diff=lfs merge=lfs -text
         | 
| 12 | 
            -
            *.model filter=lfs diff=lfs merge=lfs -text
         | 
| 13 | 
            -
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         | 
| 14 | 
            -
            *.npy filter=lfs diff=lfs merge=lfs -text
         | 
| 15 | 
            -
            *.npz filter=lfs diff=lfs merge=lfs -text
         | 
| 16 | 
            -
            *.onnx filter=lfs diff=lfs merge=lfs -text
         | 
| 17 | 
            -
            *.ot filter=lfs diff=lfs merge=lfs -text
         | 
| 18 | 
            -
            *.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 19 | 
            -
            *.pb filter=lfs diff=lfs merge=lfs -text
         | 
| 20 | 
            -
            *.pickle filter=lfs diff=lfs merge=lfs -text
         | 
| 21 | 
            -
            *.pkl filter=lfs diff=lfs merge=lfs -text
         | 
| 22 | 
            -
            *.pt filter=lfs diff=lfs merge=lfs -text
         | 
| 23 | 
            -
            *.pth filter=lfs diff=lfs merge=lfs -text
         | 
| 24 | 
            -
            *.rar filter=lfs diff=lfs merge=lfs -text
         | 
| 25 | 
            -
            *.safetensors filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
            -
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
            -
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
| 28 | 
            -
            *.tar filter=lfs diff=lfs merge=lfs -text
         | 
| 29 | 
            -
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
            -
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 31 | 
            -
            *.wasm filter=lfs diff=lfs merge=lfs -text
         | 
| 32 | 
            -
            *.xz filter=lfs diff=lfs merge=lfs -text
         | 
| 33 | 
            -
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
            -
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
            -
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
| 1 | 
            +
            *.7z filter=lfs diff=lfs merge=lfs -text
         | 
| 2 | 
            +
            *.arrow filter=lfs diff=lfs merge=lfs -text
         | 
| 3 | 
            +
            *.bin filter=lfs diff=lfs merge=lfs -text
         | 
| 4 | 
            +
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         | 
| 5 | 
            +
            *.ckpt filter=lfs diff=lfs merge=lfs -text
         | 
| 6 | 
            +
            *.ftz filter=lfs diff=lfs merge=lfs -text
         | 
| 7 | 
            +
            *.gz filter=lfs diff=lfs merge=lfs -text
         | 
| 8 | 
            +
            *.h5 filter=lfs diff=lfs merge=lfs -text
         | 
| 9 | 
            +
            *.joblib filter=lfs diff=lfs merge=lfs -text
         | 
| 10 | 
            +
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         | 
| 11 | 
            +
            *.mlmodel filter=lfs diff=lfs merge=lfs -text
         | 
| 12 | 
            +
            *.model filter=lfs diff=lfs merge=lfs -text
         | 
| 13 | 
            +
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         | 
| 14 | 
            +
            *.npy filter=lfs diff=lfs merge=lfs -text
         | 
| 15 | 
            +
            *.npz filter=lfs diff=lfs merge=lfs -text
         | 
| 16 | 
            +
            *.onnx filter=lfs diff=lfs merge=lfs -text
         | 
| 17 | 
            +
            *.ot filter=lfs diff=lfs merge=lfs -text
         | 
| 18 | 
            +
            *.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 19 | 
            +
            *.pb filter=lfs diff=lfs merge=lfs -text
         | 
| 20 | 
            +
            *.pickle filter=lfs diff=lfs merge=lfs -text
         | 
| 21 | 
            +
            *.pkl filter=lfs diff=lfs merge=lfs -text
         | 
| 22 | 
            +
            *.pt filter=lfs diff=lfs merge=lfs -text
         | 
| 23 | 
            +
            *.pth filter=lfs diff=lfs merge=lfs -text
         | 
| 24 | 
            +
            *.rar filter=lfs diff=lfs merge=lfs -text
         | 
| 25 | 
            +
            *.safetensors filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
            +
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
            +
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
| 28 | 
            +
            *.tar filter=lfs diff=lfs merge=lfs -text
         | 
| 29 | 
            +
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
            +
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 31 | 
            +
            *.wasm filter=lfs diff=lfs merge=lfs -text
         | 
| 32 | 
            +
            *.xz filter=lfs diff=lfs merge=lfs -text
         | 
| 33 | 
            +
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
            +
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
            +
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
    	
        README.md
    CHANGED
    
    | @@ -1,13 +1,13 @@ | |
| 1 | 
            -
            ---
         | 
| 2 | 
            -
            title:  | 
| 3 | 
            -
            emoji:  | 
| 4 | 
            -
            colorFrom: gray
         | 
| 5 | 
            -
            colorTo:  | 
| 6 | 
            -
            sdk: gradio
         | 
| 7 | 
            -
            sdk_version: 4. | 
| 8 | 
            -
            app_file: app.py
         | 
| 9 | 
            -
            pinned: false
         | 
| 10 | 
            -
            license: apache-2.0
         | 
| 11 | 
            -
            ---
         | 
| 12 | 
            -
             | 
| 13 | 
            -
            Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
         | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            title: SD Defense
         | 
| 3 | 
            +
            emoji: 📊
         | 
| 4 | 
            +
            colorFrom: gray
         | 
| 5 | 
            +
            colorTo: gray
         | 
| 6 | 
            +
            sdk: gradio
         | 
| 7 | 
            +
            sdk_version: 4.33.0
         | 
| 8 | 
            +
            app_file: app.py
         | 
| 9 | 
            +
            pinned: false
         | 
| 10 | 
            +
            license: apache-2.0
         | 
| 11 | 
            +
            ---
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
         | 
    	
        about.py
    ADDED
    
    | @@ -0,0 +1,83 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from dataclasses import dataclass
         | 
| 2 | 
            +
            from enum import Enum
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            @dataclass
         | 
| 5 | 
            +
            class Task:
         | 
| 6 | 
            +
                benchmark: str
         | 
| 7 | 
            +
                metric: str
         | 
| 8 | 
            +
                col_name: str
         | 
| 9 | 
            +
             | 
| 10 | 
            +
             | 
| 11 | 
            +
            # Select your tasks here
         | 
| 12 | 
            +
            # ---------------------------------------------------
         | 
| 13 | 
            +
            class Tasks(Enum):
         | 
| 14 | 
            +
                # task_key in the json file, metric_key in the json file, name to display in the leaderboard 
         | 
| 15 | 
            +
                task0 = Task("anli_r1", "acc", "ANLI")
         | 
| 16 | 
            +
                task1 = Task("logiqa", "acc_norm", "LogiQA")
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            NUM_FEWSHOT = 0 # Change with your few shot
         | 
| 19 | 
            +
            # ---------------------------------------------------
         | 
| 20 | 
            +
             | 
| 21 | 
            +
             | 
| 22 | 
            +
             | 
| 23 | 
            +
            TITLE = """<h1 align="center" id="space-title"> Demo of AdvUnlearn</h1>"""
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            # subtitle
         | 
| 26 | 
            +
            SUB_TITLE = """<h2 align="center" id="space-title">A robust unlearning framework </h1>"""
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            # What does your leaderboard evaluate?
         | 
| 29 | 
            +
            INTRODUCTION_TEXT = """
         | 
| 30 | 
            +
            AdvUnlearn is a robust unlearning framework. It aims to enhance the robustness of concept erasing by integrating
         | 
| 31 | 
            +
            the principle of adversarial training (AT) into machine unlearning and also achieves a balanced tradeoff with model utility. For details, please
         | 
| 32 | 
            +
            read the [paper](https://arxiv.org/abs/2405.15234) and check the [code](https://github.com/OPTML-Group/AdvUnlearn)
         | 
| 33 | 
            +
            """
         | 
| 34 | 
            +
             | 
| 35 | 
            +
             | 
| 36 | 
            +
            # Which evaluations are you running? how can people reproduce what you have?
         | 
| 37 | 
            +
            LLM_BENCHMARKS_TEXT = f"""
         | 
| 38 | 
            +
            ## How it works
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            ## Reproducibility
         | 
| 41 | 
            +
            To reproduce our results, here is the commands you can run:
         | 
| 42 | 
            +
             | 
| 43 | 
            +
            """
         | 
| 44 | 
            +
             | 
| 45 | 
            +
            EVALUATION_QUEUE_TEXT = """
         | 
| 46 | 
            +
            ## Some good practices before submitting a model
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            ### 1) Make sure you can load your model and tokenizer using AutoClasses:
         | 
| 49 | 
            +
            ```python
         | 
| 50 | 
            +
            from transformers import AutoConfig, AutoModel, AutoTokenizer
         | 
| 51 | 
            +
            config = AutoConfig.from_pretrained("your model name", revision=revision)
         | 
| 52 | 
            +
            model = AutoModel.from_pretrained("your model name", revision=revision)
         | 
| 53 | 
            +
            tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
         | 
| 54 | 
            +
            ```
         | 
| 55 | 
            +
            If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
         | 
| 56 | 
            +
             | 
| 57 | 
            +
            Note: make sure your model is public!
         | 
| 58 | 
            +
            Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
         | 
| 59 | 
            +
             | 
| 60 | 
            +
            ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
         | 
| 61 | 
            +
            It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
         | 
| 62 | 
            +
             | 
| 63 | 
            +
            ### 3) Make sure your model has an open license!
         | 
| 64 | 
            +
            This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
         | 
| 65 | 
            +
             | 
| 66 | 
            +
            ### 4) Fill up your model card
         | 
| 67 | 
            +
            When we add extra information about models to the leaderboard, it will be automatically taken from the model card
         | 
| 68 | 
            +
             | 
| 69 | 
            +
            ## In case of model failure
         | 
| 70 | 
            +
            If your model is displayed in the `FAILED` category, its execution stopped.
         | 
| 71 | 
            +
            Make sure you have followed the above steps first.
         | 
| 72 | 
            +
            If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
         | 
| 73 | 
            +
            """
         | 
| 74 | 
            +
             | 
| 75 | 
            +
            CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
         | 
| 76 | 
            +
            CITATION_BUTTON_TEXT = r"""
         | 
| 77 | 
            +
            @article{zhang2023generate,
         | 
| 78 | 
            +
              title={To Generate or Not? Safety-Driven Unlearned Diffusion Models Are Still Easy To Generate Unsafe Images... For Now},
         | 
| 79 | 
            +
              author={Zhang, Yimeng and Jia, Jinghan and Chen, Xin and Chen, Aochuan and Zhang, Yihua and Liu, Jiancheng and Ding, Ke and Liu, Sijia},
         | 
| 80 | 
            +
              journal={arXiv preprint arXiv:2310.11868},
         | 
| 81 | 
            +
              year={2023}
         | 
| 82 | 
            +
            }
         | 
| 83 | 
            +
            """
         | 
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,78 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import gradio as gr
         | 
| 2 | 
            +
            import os
         | 
| 3 | 
            +
            import requests
         | 
| 4 | 
            +
            import json
         | 
| 5 | 
            +
            import base64
         | 
| 6 | 
            +
            from io import BytesIO
         | 
| 7 | 
            +
            from PIL import Image
         | 
| 8 | 
            +
            from huggingface_hub import login
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            from css_html_js import custom_css
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            from about import (
         | 
| 13 | 
            +
                CITATION_BUTTON_LABEL,
         | 
| 14 | 
            +
                CITATION_BUTTON_TEXT,
         | 
| 15 | 
            +
                EVALUATION_QUEUE_TEXT,
         | 
| 16 | 
            +
                INTRODUCTION_TEXT,
         | 
| 17 | 
            +
                LLM_BENCHMARKS_TEXT,
         | 
| 18 | 
            +
                TITLE,
         | 
| 19 | 
            +
            )
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            myip = "34.219.98.113"
         | 
| 22 | 
            +
            myport=8080
         | 
| 23 | 
            +
            is_spaces = True if "SPACE_ID" in os.environ else False
         | 
| 24 | 
            +
            is_shared_ui = False
         | 
| 25 | 
            +
             | 
| 26 | 
            +
             | 
| 27 | 
            +
            def process_image_from_binary(img_stream):
         | 
| 28 | 
            +
                if img_stream is None:
         | 
| 29 | 
            +
                    print("no image binary")
         | 
| 30 | 
            +
                    return
         | 
| 31 | 
            +
                image_data = base64.b64decode(img_stream)
         | 
| 32 | 
            +
                image_bytes = BytesIO(image_data)
         | 
| 33 | 
            +
                img = Image.open(image_bytes)
         | 
| 34 | 
            +
                
         | 
| 35 | 
            +
                return img
         | 
| 36 | 
            +
             | 
| 37 | 
            +
            def generate_img(concept, prompt, seed, steps):
         | 
| 38 | 
            +
                print(f"my IP is {myip}, my port is {myport}")
         | 
| 39 | 
            +
                response = requests.post('http://{}:{}/generate'.format(myip, myport), 
         | 
| 40 | 
            +
                                         json={"concept": concept, "prompt": prompt, "seed": seed, "steps": steps},
         | 
| 41 | 
            +
                                         timeout=(10, 1200))
         | 
| 42 | 
            +
                print(f"result: {response}")
         | 
| 43 | 
            +
                image = None
         | 
| 44 | 
            +
                if response.status_code == 200:
         | 
| 45 | 
            +
                    response_json = response.json()
         | 
| 46 | 
            +
                    print(response_json)
         | 
| 47 | 
            +
                    image = process_image_from_binary(response_json['image'])
         | 
| 48 | 
            +
                else:
         | 
| 49 | 
            +
                    print(f"Request failed with status code {response.status_code}")
         | 
| 50 | 
            +
                
         | 
| 51 | 
            +
                return image
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            with gr.Blocks() as demo:
         | 
| 54 | 
            +
                gr.HTML(TITLE)
         | 
| 55 | 
            +
                gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
         | 
| 56 | 
            +
                with gr.Row() as advlearn:
         | 
| 57 | 
            +
                    with gr.Column():
         | 
| 58 | 
            +
                        # gr.Markdown("Please upload your model id.")
         | 
| 59 | 
            +
                        drop_text = gr.Dropdown(["Object-Church", "Object-Parachute", "Object-Garbage_Truck",
         | 
| 60 | 
            +
                                            "Style-VanGogh","Concept-Nudity", "None"], 
         | 
| 61 | 
            +
                                               label="AdvUnlearn Text Encoder")
         | 
| 62 | 
            +
                    with gr.Column():
         | 
| 63 | 
            +
                        text_input = gr.Textbox(label="Prompt")
         | 
| 64 | 
            +
                    
         | 
| 65 | 
            +
                with gr.Row():
         | 
| 66 | 
            +
                    with gr.Column():
         | 
| 67 | 
            +
                        with gr.Row():
         | 
| 68 | 
            +
                            seed = gr.Textbox(label="seed", value=666)
         | 
| 69 | 
            +
                        with gr.Row():
         | 
| 70 | 
            +
                            steps = gr.Textbox(label="num_steps", value=100)
         | 
| 71 | 
            +
                        with gr.Row():
         | 
| 72 | 
            +
                            start_button = gr.Button("AdvUnlearn",size='lg')
         | 
| 73 | 
            +
                    with gr.Column(min_width=512):
         | 
| 74 | 
            +
                        result_img = gr.Image(label="Image Gnerated by AdvUnlearn",width=512,show_share_button=False,show_download_button=False)
         | 
| 75 | 
            +
             | 
| 76 | 
            +
                start_button.click(fn=generate_img, inputs=[drop_text, text_input, seed, steps], outputs=result_img, api_name="generate")
         | 
| 77 | 
            +
                        
         | 
| 78 | 
            +
            demo.launch()
         | 
    	
        css_html_js.py
    ADDED
    
    | @@ -0,0 +1,105 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            custom_css = """
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            .markdown-text {
         | 
| 4 | 
            +
                font-size: 16px !important;
         | 
| 5 | 
            +
            }
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            #models-to-add-text {
         | 
| 8 | 
            +
                font-size: 18px !important;
         | 
| 9 | 
            +
            }
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            #citation-button span {
         | 
| 12 | 
            +
                font-size: 16px !important;
         | 
| 13 | 
            +
            }
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            #citation-button textarea {
         | 
| 16 | 
            +
                font-size: 16px !important;
         | 
| 17 | 
            +
            }
         | 
| 18 | 
            +
             | 
| 19 | 
            +
            #citation-button > label > button {
         | 
| 20 | 
            +
                margin: 6px;
         | 
| 21 | 
            +
                transform: scale(1.3);
         | 
| 22 | 
            +
            }
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            #leaderboard-table {
         | 
| 25 | 
            +
                margin-top: 15px
         | 
| 26 | 
            +
            }
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            #leaderboard-table-lite {
         | 
| 29 | 
            +
                margin-top: 15px
         | 
| 30 | 
            +
            }
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            #search-bar-table-box > div:first-child {
         | 
| 33 | 
            +
                background: none;
         | 
| 34 | 
            +
                border: none;
         | 
| 35 | 
            +
            }
         | 
| 36 | 
            +
             
         | 
| 37 | 
            +
            #search-bar {
         | 
| 38 | 
            +
                padding: 0px;
         | 
| 39 | 
            +
            }
         | 
| 40 | 
            +
             | 
| 41 | 
            +
            /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
         | 
| 42 | 
            +
            table td:first-child,
         | 
| 43 | 
            +
            table th:first-child {
         | 
| 44 | 
            +
                max-width: 400px;
         | 
| 45 | 
            +
                overflow: auto;
         | 
| 46 | 
            +
                white-space: nowrap;
         | 
| 47 | 
            +
            }
         | 
| 48 | 
            +
             | 
| 49 | 
            +
            .tab-buttons button {
         | 
| 50 | 
            +
                font-size: 20px;
         | 
| 51 | 
            +
            }
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            #scale-logo {
         | 
| 54 | 
            +
                border-style: none !important;
         | 
| 55 | 
            +
                box-shadow: none;
         | 
| 56 | 
            +
                display: block;
         | 
| 57 | 
            +
                margin-left: auto;
         | 
| 58 | 
            +
                margin-right: auto;
         | 
| 59 | 
            +
                max-width: 600px;
         | 
| 60 | 
            +
            }
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            #scale-logo .download {
         | 
| 63 | 
            +
                display: none;
         | 
| 64 | 
            +
            }
         | 
| 65 | 
            +
            #filter_type{
         | 
| 66 | 
            +
                border: 0;
         | 
| 67 | 
            +
                padding-left: 0;
         | 
| 68 | 
            +
                padding-top: 0;
         | 
| 69 | 
            +
            }
         | 
| 70 | 
            +
            #filter_type label {
         | 
| 71 | 
            +
                display: flex;
         | 
| 72 | 
            +
            }
         | 
| 73 | 
            +
            #filter_type label > span{
         | 
| 74 | 
            +
                margin-top: var(--spacing-lg);
         | 
| 75 | 
            +
                margin-right: 0.5em;
         | 
| 76 | 
            +
            }
         | 
| 77 | 
            +
            #filter_type label > .wrap{
         | 
| 78 | 
            +
                width: 103px;
         | 
| 79 | 
            +
            }
         | 
| 80 | 
            +
            #filter_type label > .wrap .wrap-inner{  
         | 
| 81 | 
            +
                padding: 2px;
         | 
| 82 | 
            +
            }
         | 
| 83 | 
            +
            #filter_type label > .wrap .wrap-inner input{
         | 
| 84 | 
            +
                width: 1px
         | 
| 85 | 
            +
            }
         | 
| 86 | 
            +
            #filter-columns-type{
         | 
| 87 | 
            +
                border:0;
         | 
| 88 | 
            +
                padding:0.5;
         | 
| 89 | 
            +
            }
         | 
| 90 | 
            +
            #filter-columns-size{
         | 
| 91 | 
            +
                border:0;
         | 
| 92 | 
            +
                padding:0.5;
         | 
| 93 | 
            +
            }
         | 
| 94 | 
            +
            #box-filter > .form{
         | 
| 95 | 
            +
                border: 0
         | 
| 96 | 
            +
            }
         | 
| 97 | 
            +
            """
         | 
| 98 | 
            +
             | 
| 99 | 
            +
            get_window_url_params = """
         | 
| 100 | 
            +
                function(url_params) {
         | 
| 101 | 
            +
                    const params = new URLSearchParams(window.location.search);
         | 
| 102 | 
            +
                    url_params = Object.fromEntries(params);
         | 
| 103 | 
            +
                    return url_params;
         | 
| 104 | 
            +
                }
         | 
| 105 | 
            +
                """
         | 
    	
        images/cheetah.jpg
    ADDED
    
    |   | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,14 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            APScheduler
         | 
| 2 | 
            +
            black
         | 
| 3 | 
            +
            click
         | 
| 4 | 
            +
            datasets
         | 
| 5 | 
            +
            gradio
         | 
| 6 | 
            +
            gradio_client
         | 
| 7 | 
            +
            huggingface-hub>=0.18.0
         | 
| 8 | 
            +
            matplotlib
         | 
| 9 | 
            +
            numpy
         | 
| 10 | 
            +
            pandas
         | 
| 11 | 
            +
            python-dateutil
         | 
| 12 | 
            +
            requests
         | 
| 13 | 
            +
            tqdm
         | 
| 14 | 
            +
            transformers
         | 
