|
|
import os |
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "" |
|
|
|
|
|
import spaces |
|
|
import gradio as gr |
|
|
from peft import AutoPeftModelForCausalLM |
|
|
from transformers import AutoTokenizer |
|
|
|
|
|
|
|
|
model = AutoPeftModelForCausalLM.from_pretrained("eforse01/lora_model").to("cuda") |
|
|
tokenizer = AutoTokenizer.from_pretrained("eforse01/lora_model") |
|
|
|
|
|
@spaces.GPU(duration=120) |
|
|
def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, min_p): |
|
|
|
|
|
messages = [{"role": "system", "content": system_message}] |
|
|
for val in history: |
|
|
if val[0]: |
|
|
messages.append({"role": "user", "content": val[0]}) |
|
|
if val[1]: |
|
|
messages.append({"role": "assistant", "content": val[1]}) |
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
|
|
|
inputs = tokenizer.apply_chat_template( |
|
|
messages, |
|
|
tokenize=True, |
|
|
add_generation_prompt=True, |
|
|
return_tensors="pt", |
|
|
) |
|
|
|
|
|
|
|
|
input_ids = inputs.to("cuda") |
|
|
print("Input IDs shape:", input_ids.shape) |
|
|
|
|
|
|
|
|
output = model.generate( |
|
|
input_ids=input_ids, |
|
|
max_new_tokens=max_tokens, |
|
|
use_cache=True, |
|
|
temperature=temperature, |
|
|
min_p=min_p, |
|
|
) |
|
|
|
|
|
|
|
|
print("Generated Output Shape:", output.shape) |
|
|
print("Generated Output:", output) |
|
|
|
|
|
|
|
|
response = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
yield response.split("assistant")[-1] |
|
|
|
|
|
|
|
|
|
|
|
demo = gr.ChatInterface( |
|
|
respond, |
|
|
additional_inputs=[ |
|
|
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), |
|
|
gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"), |
|
|
gr.Slider(minimum=0.1, maximum=4.0, value=1.5, step=0.1, label="Temperature"), |
|
|
gr.Slider(minimum=0.1, maximum=1.0, value=0.99, step=0.01, label="Min-p"), |
|
|
], |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|