update
Browse files- app.py +1 -1
- code/PPLUIE/config.py +5 -0
- code/PPLUIE/ppluie.py +0 -5
- code/example.py +4 -2
    	
        app.py
    CHANGED
    
    | @@ -11,7 +11,7 @@ with gr.Blocks() as demo: | |
| 11 | 
             
                gr.Markdown(
         | 
| 12 | 
             
                """
         | 
| 13 | 
             
                # ParaPLUIE (Paraphrase Generation Evaluation Powered by an LLM)
         | 
| 14 | 
            -
                 | 
| 15 | 
             
                ParaPLUIE use the perplexity of an LLM to compute a confidence score.
         | 
| 16 | 
             
                It has shown the highest correlation with human judgement on paraphrase classification meanwhile reamin the computional cost low as it roughtly equal to one token generation cost.
         | 
| 17 | 
             
                """)
         | 
|  | |
| 11 | 
             
                gr.Markdown(
         | 
| 12 | 
             
                """
         | 
| 13 | 
             
                # ParaPLUIE (Paraphrase Generation Evaluation Powered by an LLM)
         | 
| 14 | 
            +
                ParaPLUIE is a metric for evaluating the semantic proximity of two sentences. 
         | 
| 15 | 
             
                ParaPLUIE use the perplexity of an LLM to compute a confidence score.
         | 
| 16 | 
             
                It has shown the highest correlation with human judgement on paraphrase classification meanwhile reamin the computional cost low as it roughtly equal to one token generation cost.
         | 
| 17 | 
             
                """)
         | 
    	
        code/PPLUIE/config.py
    CHANGED
    
    | @@ -1,3 +1,8 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 1 | 
             
            model_dict = {
         | 
| 2 | 
             
                #small
         | 
| 3 | 
             
                "HuggingFaceTB/SmolLM2-135M-Instruct" : {
         | 
|  | |
| 1 | 
            +
            def show_available_models():
         | 
| 2 | 
            +
                print("LLM tested with PPLUIE: ")
         | 
| 3 | 
            +
                for k in (model_dict.keys()):
         | 
| 4 | 
            +
                    print(k)
         | 
| 5 | 
            +
             | 
| 6 | 
             
            model_dict = {
         | 
| 7 | 
             
                #small
         | 
| 8 | 
             
                "HuggingFaceTB/SmolLM2-135M-Instruct" : {
         | 
    	
        code/PPLUIE/ppluie.py
    CHANGED
    
    | @@ -45,11 +45,6 @@ class ppluie: | |
| 45 | 
             
                def show_templates(self):
         | 
| 46 | 
             
                    print_templates()
         | 
| 47 |  | 
| 48 | 
            -
                def show_available_models(self):
         | 
| 49 | 
            -
                    print("LLM tested with PPLUIE: ")
         | 
| 50 | 
            -
                    for k in (model_dict.keys()):
         | 
| 51 | 
            -
                        print(k)
         | 
| 52 | 
            -
             | 
| 53 | 
             
                def setTemplate(self, template: str):
         | 
| 54 | 
             
                    check_template_name(template)
         | 
| 55 | 
             
                    self.template = template
         | 
|  | |
| 45 | 
             
                def show_templates(self):
         | 
| 46 | 
             
                    print_templates()
         | 
| 47 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 48 | 
             
                def setTemplate(self, template: str):
         | 
| 49 | 
             
                    check_template_name(template)
         | 
| 50 | 
             
                    self.template = template
         | 
    	
        code/example.py
    CHANGED
    
    | @@ -1,11 +1,15 @@ | |
| 1 | 
             
            # install this lib : pip install {PATH}/ParaPLUIE
         | 
| 2 |  | 
| 3 | 
             
            from PPLUIE import ppluie
         | 
|  | |
| 4 |  | 
| 5 | 
             
            template = "FS-DIRECT"
         | 
| 6 |  | 
| 7 | 
             
            device = "cuda:1"
         | 
| 8 |  | 
|  | |
|  | |
|  | |
| 9 | 
             
            scorer = ppluie("mistralai/Mistral-7B-Instruct-v0.2", device)
         | 
| 10 | 
             
            scorer.setTemplate(template)
         | 
| 11 |  | 
| @@ -14,8 +18,6 @@ scorer.show_templates() | |
| 14 | 
             
            # to show how is the prompt encoded, to ensure that the correct numbers of special tokens are removed
         | 
| 15 | 
             
            # and Yes / No words fit on one token
         | 
| 16 | 
             
            scorer.chech_end_tokens_tmpl()
         | 
| 17 | 
            -
            # to show LLM already tested with ParaPLUIE
         | 
| 18 | 
            -
            scorer.show_available_models()
         | 
| 19 |  | 
| 20 | 
             
            S = "Have you ever seen a tsunami ?"   	
         | 
| 21 | 
             
            H = "Have you ever seen a tiramisu ?"
         | 
|  | |
| 1 | 
             
            # install this lib : pip install {PATH}/ParaPLUIE
         | 
| 2 |  | 
| 3 | 
             
            from PPLUIE import ppluie
         | 
| 4 | 
            +
            from PPLUIE.config import show_available_models
         | 
| 5 |  | 
| 6 | 
             
            template = "FS-DIRECT"
         | 
| 7 |  | 
| 8 | 
             
            device = "cuda:1"
         | 
| 9 |  | 
| 10 | 
            +
            # to show LLM already tested with ParaPLUIE
         | 
| 11 | 
            +
            show_available_models()
         | 
| 12 | 
            +
             | 
| 13 | 
             
            scorer = ppluie("mistralai/Mistral-7B-Instruct-v0.2", device)
         | 
| 14 | 
             
            scorer.setTemplate(template)
         | 
| 15 |  | 
|  | |
| 18 | 
             
            # to show how is the prompt encoded, to ensure that the correct numbers of special tokens are removed
         | 
| 19 | 
             
            # and Yes / No words fit on one token
         | 
| 20 | 
             
            scorer.chech_end_tokens_tmpl()
         | 
|  | |
|  | |
| 21 |  | 
| 22 | 
             
            S = "Have you ever seen a tsunami ?"   	
         | 
| 23 | 
             
            H = "Have you ever seen a tiramisu ?"
         |