Spaces:
Sleeping
Sleeping
| #from huggingface_hub import InferenceClient | |
| import gradio as gr | |
| from transformers import pipeline | |
| #gr.load("models/grammarly/coedit-large").launch() | |
| # Load the model and tokenizer using the pipeline API | |
| model_pipeline = pipeline("text-generation", model="grammarly/coedit-large") | |
| def generate_text(input_text, history, temperature=0.9, max_new_tokens=50, top_p=0.95, top_k=50): | |
| # Generate text using the model | |
| output = model_pipeline(input_text, temperature=temperature, max_length=max_new_tokens, top_p=top_p, top_k=top_k) | |
| # Extract and return the generated text | |
| return output[0]['generated_text'] | |
| additional_inputs=[ | |
| gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ), | |
| gr.Slider( label="Max new tokens", value=150, minimum=0, maximum=250, step=64, interactive=True, info="The maximum numbers of new tokens", ), | |
| gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ), | |
| gr.Slider( label="Top-k", value=50, minimum=0, maximum=100, step=1, interactive=True, info="Limits the number of top-k tokens considered at each step"), | |
| ] | |
| gr.ChatInterface( | |
| fn=generate_text, | |
| chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
| additional_inputs=additional_inputs, | |
| title="My Grammarly Space", | |
| concurrency_limit=20, | |
| ).launch(show_api=False) | |