Mid-sized software company (500+ employees) overwhelmed by 200+ daily IT support tickets, with 60% addressing routine, repetitive issues. Long resolution times (average 4.2 hours) hurt employee productivity. Support team spent majority of time on simple problems, had limited after-hours coverage for remote workers, and delivered inconsistent solutions.
This project involved the development and deployment of a production-ready AI-powered chatbot designed to handle common IT support requests. Using neural networks, natural language processing, and context tracking, the chatbot can understand user inquiries, provide contextually relevant solutions, and guide users through troubleshooting steps, ultimately reducing IT support ticket volume by 25%.
The client, a mid-sized software company with 500+ employees, faced several challenges with their IT support system:
I developed a comprehensive AI chatbot solution with several key components:
Built a sophisticated neural network using TensorFlow and Keras to accurately classify user intents:
def build_model(self):
# Build neural network model
train_x, train_y = self.create_training_data()
# Create model - 3 layers: 128 neurons, 64 neurons, output neurons
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
# Compile model
sgd = SGD(learning_rate=0.01, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# Fit the model
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
self.model = model
# Save model, words and classes
model.save(self.model_filename)
pickle.dump(self.words, open(self.words_filename, 'wb'))
pickle.dump(self.classes, open(self.classes_filename, 'wb'))
return hist
Implemented a complete NLP pipeline using NLTK for language understanding:
def preprocess_data(self):
# Extract all words and classes from intents
for intent in self.intents['intents']:
for pattern in intent['patterns']:
# Tokenize each pattern
w = word_tokenize(pattern)
self.words.extend(w)
# Add documents in the corpus
self.documents.append((w, intent['tag']))
# Add to our classes list
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
# Lemmatize and lower each word and remove duplicates
self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in self.ignore_words]
self.words = sorted(list(set(self.words)))
self.classes = sorted(list(set(self.classes)))
def clean_up_sentence(self, sentence):
# Tokenize the pattern - split words into array
sentence_words = word_tokenize(sentence)
# Lemmatize each word - create short form for word
sentence_words = [self.lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
def bow(self, sentence, words):
# Bag of words - matrix of N words, vocabulary matrix
sentence_words = self.clean_up_sentence(sentence)
# Bag of words - matrix of N words, vocabulary matrix
bag = [0] * len(words)
for s in sentence_words:
for i, w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
return np.array(bag)
Developed sophisticated context tracking to maintain conversational state across interactions:
def predict_class(self, sentence, user_id):
# Filter below threshold predictions
ERROR_THRESHOLD = 0.25
# Generate probabilities from the model
bow = self.bow(sentence, self.words)
res = self.model.predict(np.array([bow]))[0]
# Check current context if any
current_context = self.user_context.get(user_id, None)
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
# Sort by probability
results.sort(key=lambda x: x[1], reverse=True)
result_list = []
for r in results:
result_list.append({"intent": self.classes[r[0]], "probability": str(r[1])})
# If there's a current context, prioritize intents within that context
if current_context:
for i, result in enumerate(result_list):
intent_name = result["intent"]
# Find the intent in the intents file
for intent in self.intents['intents']:
if intent['tag'] == intent_name:
# If this intent is in the current context, move it to the top
if 'context_filter' in intent and intent['context_filter'] == current_context:
result_list.insert(0, result_list.pop(i))
break
return result_list
Implemented intelligent response selection based on intent, probability, and context:
def get_response(self, intent_list, user_id):
# Get a random response from the intent with highest probability
if not intent_list:
return "I don't understand. Could you please rephrase?"
tag = intent_list[0]['intent']
probability = float(intent_list[0]['probability'])
# If model is not confident enough, ask for clarification
if probability < 0.6:
return "I'm not quite sure what you're asking. Could you provide more details about your IT issue?"
# Get the intent from our intents list
for intent in self.intents['intents']:
if intent['tag'] == tag:
# Store the new context if specified
if 'context_set' in intent:
self.user_context[user_id] = intent['context_set']
# Check if this response should reset context
if 'context_reset' in intent and intent['context_reset']:
if user_id in self.user_context:
del self.user_context[user_id]
# Store this interaction in conversation history
if user_id not in self.conversation_history:
self.conversation_history[user_id] = []
self.conversation_history[user_id].append({
"intent": tag,
"context": self.user_context.get(user_id, None)
})
# Return a random response from the intent
return random.choice(intent['responses'])
return "Something went wrong. Please try again."
Created metrics analysis functionality to quantify the chatbot's effectiveness:
def analyze_ticket_metrics(self, tickets_before, tickets_after):
"""
Analyze the effectiveness of the chatbot in reducing ticket volume
Parameters:
tickets_before (int): Number of tickets before implementing the chatbot
tickets_after (int): Number of tickets after implementing the chatbot
Returns:
dict: Statistics about ticket reduction
"""
reduction = tickets_before - tickets_after
reduction_percentage = (reduction / tickets_before) * 100
return {
"tickets_before": tickets_before,
"tickets_after": tickets_after,
"reduction": reduction,
"reduction_percentage": reduction_percentage,
"goal_met": reduction_percentage >= 25
}
The chatbot was deployed as a production-ready solution with full API integration:
from flask import Flask, request, jsonify
import threading
app = Flask(__name__)
chatbot = ITSupportChatbot()
# Load the model in a separate thread to avoid blocking the server startup
def load_model():
try:
chatbot.load_trained_model()
print("Model loaded successfully")
except Exception as e:
print(f"Error loading model: {e}")
print("Training new model...")
chatbot.preprocess_data()
chatbot.build_model()
# Start loading the model
threading.Thread(target=load_model).start()
@app.route('/api/chat', methods=['POST'])
def chat_endpoint():
data = request.json
if not data or 'message' not in data:
return jsonify({'error': 'No message provided'}), 400
user_id = data.get('user_id', 'anonymous')
message = data['message']
response = chatbot.chat(message, user_id)
return jsonify({
'response': response,
'context': chatbot.user_context.get(user_id, None),
'history_length': len(chatbot.conversation_history.get(user_id, []))
})
@app.route('/api/metrics', methods=['POST'])
def metrics_endpoint():
data = request.json
if not data or 'tickets_before' not in data or 'tickets_after' not in data:
return jsonify({'error': 'Missing required data'}), 400
metrics = chatbot.analyze_ticket_metrics(data['tickets_before'], data['tickets_after'])
return jsonify(metrics)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
Implemented direct integration with the company's ticket management system:
class TicketSystemIntegration:
def __init__(self, api_url, username, password):
self.api_url = api_url
self.auth = HTTPBasicAuth(username, password)
self.headers = {"Content-Type": "application/json"}
def create_ticket(self, user_id, issue_description, category):
# Create a ticket in the system when chatbot can't resolve an issue
data = {
"user_id": user_id,
"description": issue_description,
"category": category,
"source": "IT Support Chatbot"
}
response = requests.post(
f"{self.api_url}/incidents",
json=data,
headers=self.headers,
auth=self.auth
)
if response.status_code == 201:
return {"success": True, "ticket_id": response.json().get("number")}
else:
return {"success": False, "error": response.text}
def get_ticket_status(self, ticket_id):
# Allow users to check on their ticket status
response = requests.get(
f"{self.api_url}/incidents/{ticket_id}",
headers=self.headers,
auth=self.auth
)
if response.status_code == 200:
ticket_data = response.json()
return {
"success": True,
"status": ticket_data.get("state"),
"assigned_to": ticket_data.get("assigned_to", {}).get("display_value"),
"updated": ticket_data.get("sys_updated_on")
}
else:
return {"success": False, "error": response.text}
The chatbot was developed and deployed using a structured approach:
Several significant challenges were addressed during development:
Beyond the direct metrics, the chatbot delivered significant business value:
The project laid groundwork for several planned enhancements:
This project demonstrated expertise in: