|
| 1 | +import json |
| 2 | +import streamlit as st |
| 3 | +import numpy as np |
| 4 | +import tensorflow as tf |
| 5 | +from tensorflow.keras.models import load_model |
| 6 | +from tensorflow.keras.preprocessing.sequence import pad_sequences |
| 7 | +import pickle |
| 8 | + |
| 9 | +# Load the model, tokenizer, and label encoder |
| 10 | +model = load_model('chatbot_model.h5') |
| 11 | + |
| 12 | +with open('tokenizer.pickle', 'rb') as handle: |
| 13 | + tokenizer = pickle.load(handle) |
| 14 | + |
| 15 | +with open('label_encoder.pickle', 'rb') as handle: |
| 16 | + label_encoder = pickle.load(handle) |
| 17 | + |
| 18 | +# Function to predict intents based on user input |
| 19 | +def predict_intent(user_input): |
| 20 | + user_input_seq = tokenizer.texts_to_sequences([user_input]) |
| 21 | + user_input_pad = pad_sequences(user_input_seq, maxlen=model.input_shape[1], padding='post') |
| 22 | + prediction = model.predict(user_input_pad) |
| 23 | + intent = label_encoder.inverse_transform([np.argmax(prediction)]) |
| 24 | + return intent[0] |
| 25 | + |
| 26 | +# Function to generate responses based on predicted intents |
| 27 | +def generate_response(intent): |
| 28 | + with open('therapy_data.json', 'r') as f: |
| 29 | + data = json.load(f) |
| 30 | + responses = [i['responses'] for i in data['intents'] if i['tag'] == intent] |
| 31 | + response = np.random.choice(responses[0]) |
| 32 | + return response |
| 33 | + |
| 34 | +# Streamlit UI |
| 35 | +st.title("AI Therapy Chatbot") |
| 36 | + |
| 37 | +user_input = st.text_input("You:", "") |
| 38 | + |
| 39 | +if user_input: |
| 40 | + intent = predict_intent(user_input) |
| 41 | + response = generate_response(intent) |
| 42 | + st.write(f"Chatbot: {response}") |
0 commit comments