import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import torch # Load model @st.cache_resource def load_model(): model_id = "ibm-granite/granite-3.3-2b-instruct" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32) return pipeline("text-generation", model=model, tokenizer=tokenizer) generator = load_model() # Streamlit UI st.title("🧠 HealthAI Chatbot") st.markdown("Ask me about your symptoms or health advice!") user_input = st.text_input("💬 Enter your symptoms or question:", "") if user_input: prompt = f"Answer as a health assistant: {user_input}" output = generator(prompt, max_new_tokens=150, do_sample=True)[0]["generated_text"] cleaned_output = output.replace(prompt, "").strip() st.success(cleaned_output)