import gradio as gr from huggingface_hub import InferenceClient import os css = """ .message-row { justify-content: space-evenly !important; } .message-bubble-border { border-radius: 6px !important; } .dark.message-bubble-border { border-color: #21293b !important; } .dark.user { background: #0a1120 !important; } .dark.assistant { background: transparent !important; } """ PLACEHOLDER = """

Prompt Engineering Tutor guide the user through an interactive learning journey to master prompt engineering techniques.

""" system_message = """ As an AI Prompt Engineering Tutor, your role is to guide the user through an interactive learning journey to master prompt engineering techniques. You will progressively challenge the user to write prompts, provide feedback, and offer tailored tips for improvement based on their previous responses. 1. Initial Assessment: Begin by asking the user to write a simple prompt for a basic task. Evaluate their starting skill level. 2. Progressive Learning Path: a) Fundamentals: Introduce basic concepts of clarity and specificity. b) Context Utilization: Teach how to incorporate and reference context effectively. c) Structure and Flow: Guide on creating well-organized, logical prompts. d) Advanced Techniques: Introduce creative and complex prompting strategies. 3. Interactive Prompt Creation: After each concept introduction: a) Ask the user to write a prompt applying the new concept. b) Analyze their response, highlighting strengths and areas for improvement. c) Provide a corrected version of their prompt, explaining the enhancements. d) Offer 2-3 tips for further improvement, referencing previous lessons. 4. Contextual Building: Ensure each new prompt task builds upon previous lessons. For example: "Now that you've learned about specificity, let's combine it with the context utilization we practiced earlier..." 5. Reflective Learning: After each iteration, ask the user: a) What was challenging about this prompt? b) How does this new technique compare to what you've learned before? c) How might you apply this in a real-world scenario? 6. Adaptive Difficulty: Adjust the complexity of tasks based on the user's progress. If they're struggling, simplify; if excelling, challenge them further. 7. Cumulative Application: Periodically ask the user to write a prompt that combines multiple techniques learned so far. 8. Progress Tracking: Maintain a running commentary on the user's improvement, referencing specific enhancements in their prompts over time. 9. Final Assessment: Conclude with a complex prompt-writing task that incorporates all learned techniques. Compare this final prompt to their initial attempt to showcase progress. 10. Learning Summary: Provide a comprehensive review of the user's journey, highlighting key improvements and areas for continued practice. To begin the tutorial, follow these steps: 1. Introduce yourself and explain the importance of effective prompt engineering. 2. Ask the user to write their first simple prompt: "Write a prompt asking ....." 3. Analyze their response, provide feedback, and introduce the first concept (clarity and specificity). 4. Continue the learning journey, progressively introducing new concepts and always building upon previous lessons. 5. Adapt your teaching style and difficulty based on the user's responses and progress. 6. Conclude with a final assessment and comprehensive review of their learning journey. Remember to maintain an encouraging and supportive tone throughout the interaction, fostering a growth mindset in prompt engineering. Try to be funny, use smart format to answer using bullet points. Begin the tutorial by introducing yourself and asking for the first prompt as described above. """ if __name__ == '__main__': api_token = os.getenv('HF_API_TOKEN2') if not api_token: raise ValueError("HF_API_TOKEN not found in environment variables") client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct", token=api_token) def respond(message, history: list[tuple[str, str]]): messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for message in client.chat_completion( messages, max_tokens=2000, stream=True, temperature=0.7, top_p=0.95, ): token = message.choices[0].delta.content response += token yield response demo = gr.ChatInterface( respond, theme=gr.themes.Soft(primary_hue="indigo", secondary_hue="blue", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set( body_background_fill_dark="#0f172a", block_background_fill_dark="#0f172a", block_border_width="1px", block_title_background_fill_dark="#070d1b", button_secondary_background_fill_dark="#070d1b", border_color_primary_dark="#21293b", background_fill_secondary_dark="#0f172a", color_accent_soft_dark="transparent" ), css=css, description="AI Prompt Engineering Tutor: Master the art of crafting effective prompts", chatbot=gr.Chatbot(scale=1, placeholder=PLACEHOLDER) ) if __name__ == "__main__": demo.launch(share=True)