|
| 1 | +from transformers import AutoModel, AutoTokenizer |
| 2 | +import gradio as gr |
| 3 | +import mdtex2html |
| 4 | + |
| 5 | +tokenizer = AutoTokenizer.from_pretrained("/content/chatglm-6b-int4", trust_remote_code=True) |
| 6 | +model = AutoModel.from_pretrained("/content/chatglm-6b-int4", trust_remote_code=True).half().cuda() |
| 7 | +model = model.eval() |
| 8 | + |
| 9 | +"""Override Chatbot.postprocess""" |
| 10 | + |
| 11 | + |
| 12 | +def postprocess(self, y): |
| 13 | + if y is None: |
| 14 | + return [] |
| 15 | + for i, (message, response) in enumerate(y): |
| 16 | + y[i] = ( |
| 17 | + None if message is None else mdtex2html.convert((message)), |
| 18 | + None if response is None else mdtex2html.convert(response), |
| 19 | + ) |
| 20 | + return y |
| 21 | + |
| 22 | + |
| 23 | +gr.Chatbot.postprocess = postprocess |
| 24 | + |
| 25 | + |
| 26 | +def parse_text(text): |
| 27 | + """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" |
| 28 | + lines = text.split("\n") |
| 29 | + lines = [line for line in lines if line != ""] |
| 30 | + count = 0 |
| 31 | + for i, line in enumerate(lines): |
| 32 | + if "```" in line: |
| 33 | + count += 1 |
| 34 | + items = line.split('`') |
| 35 | + if count % 2 == 1: |
| 36 | + lines[i] = f'<pre><code class="language-{items[-1]}">' |
| 37 | + else: |
| 38 | + lines[i] = f'<br></code></pre>' |
| 39 | + else: |
| 40 | + if i > 0: |
| 41 | + if count % 2 == 1: |
| 42 | + line = line.replace("`", "\`") |
| 43 | + line = line.replace("<", "<") |
| 44 | + line = line.replace(">", ">") |
| 45 | + line = line.replace(" ", " ") |
| 46 | + line = line.replace("*", "*") |
| 47 | + line = line.replace("_", "_") |
| 48 | + line = line.replace("-", "-") |
| 49 | + line = line.replace(".", ".") |
| 50 | + line = line.replace("!", "!") |
| 51 | + line = line.replace("(", "(") |
| 52 | + line = line.replace(")", ")") |
| 53 | + line = line.replace("$", "$") |
| 54 | + lines[i] = "<br>"+line |
| 55 | + text = "".join(lines) |
| 56 | + return text |
| 57 | + |
| 58 | + |
| 59 | +def predict(input, chatbot, max_length, top_p, temperature, history): |
| 60 | + chatbot.append((parse_text(input), "")) |
| 61 | + for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, |
| 62 | + temperature=temperature): |
| 63 | + chatbot[-1] = (parse_text(input), parse_text(response)) |
| 64 | + |
| 65 | + yield chatbot, history |
| 66 | + |
| 67 | + |
| 68 | +def reset_user_input(): |
| 69 | + return gr.update(value='') |
| 70 | + |
| 71 | + |
| 72 | +def reset_state(): |
| 73 | + return [], [] |
| 74 | + |
| 75 | + |
| 76 | +with gr.Blocks() as demo: |
| 77 | + gr.HTML("""<h1 align="center">ChatGLM</h1>""") |
| 78 | + |
| 79 | + chatbot = gr.Chatbot() |
| 80 | + with gr.Row(): |
| 81 | + with gr.Column(scale=4): |
| 82 | + with gr.Column(scale=12): |
| 83 | + user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style( |
| 84 | + container=False) |
| 85 | + with gr.Column(min_width=32, scale=1): |
| 86 | + submitBtn = gr.Button("Submit", variant="primary") |
| 87 | + with gr.Column(scale=1): |
| 88 | + emptyBtn = gr.Button("Clear History") |
| 89 | + max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) |
| 90 | + top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) |
| 91 | + temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) |
| 92 | + |
| 93 | + history = gr.State([]) |
| 94 | + |
| 95 | + submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], |
| 96 | + show_progress=True) |
| 97 | + submitBtn.click(reset_user_input, [], [user_input]) |
| 98 | + |
| 99 | + emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True) |
| 100 | + |
| 101 | +demo.queue().launch(share=True, inbrowser=True) |
0 commit comments