MdKaif123 commited on
Commit
8dbaa1a
·
verified ·
1 Parent(s): 47007c6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -0
app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
+ import torch
4
+
5
+ # Load model
6
+ @st.cache_resource
7
+ def load_model():
8
+ model_id = "ibm-granite/granite-3.3-2b-instruct"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
10
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
11
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
12
+
13
+ generator = load_model()
14
+
15
+ # Streamlit UI
16
+ st.title("🧠 HealthAI Chatbot")
17
+ st.markdown("Ask me about your symptoms or health advice!")
18
+
19
+ user_input = st.text_input("💬 Enter your symptoms or question:", "")
20
+
21
+ if user_input:
22
+ prompt = f"Answer as a health assistant: {user_input}"
23
+ output = generator(prompt, max_new_tokens=150, do_sample=True)[0]["generated_text"]
24
+ cleaned_output = output.replace(prompt, "").strip()
25
+ st.success(cleaned_output)