import gradio as gr import transformers as tr SPECIAL_TOKENS = { 'eos_token': '<|EOS|>', 'bos_token': '<|endoftext|>', 'pad_token': '', 'sep_token': '<|body|>' } MPATH = "./mdl_roomgen2" MODEL = tr.GPT2LMHeadModel.from_pretrained(MPATH) # ToDo: Will save tokenizer next time so can replace this with a load TOK = tr.GPT2Tokenizer.from_pretrained("gpt2") TOK.add_special_tokens(SPECIAL_TOKENS) def generate_room(room_name, method, max_length): """ Uses pretrained model to generate text for a dungeon room Args: room_name: method: Returns: """ prompt = " ".join( [ SPECIAL_TOKENS["bos_token"], room_name, SPECIAL_TOKENS["sep_token"] ] ) ids = TOK.encode(prompt, return_tensors="pt") if method == "Greedy": output = MODEL.generate(ids, max_length=max_length) elif method == "Beam": output = MODEL.generate( ids, max_length=max_length, num_beams=5, no_repeat_ngram_size=4, early_stopping=True ) elif method == "Sample": # Sample output = MODEL.generate( ids, max_length=max_length, do_sample=True, top_k=0 ) elif method == "Top K": # Top K - redistribute probability over top words, see Fan et al. 2018 output = MODEL.generate( ids, max_length=max_length, do_sample=True, top_k=50 ) else: raise ValueError(f"Unexpected generation method, received {method}") output = TOK.decode(output[0][ids.shape[1]:], clean_up_tokenization_spaces=True).replace(" ", " ") return output if __name__ == "__main__": iface = gr.Interface( title="RPG Room Generator", fn=generate_room, inputs=[ gr.inputs.Textbox(lines=1), gr.inputs.Radio(["Greedy", "Beam", "Sample", "Top K"], default="Top K"), gr.inputs.Slider(minimum=50, maximum=250, default=100) ], outputs="text", examples=[["Dark Catacombs", "Top K", 150]], layout="horizontal", allow_flagging=None, theme="dark" ) app, local_url, share_url = iface.launch()