434 lines
18 KiB
Python
434 lines
18 KiB
Python
"""
|
|
AI Assistant Service for Chat Integration
|
|
Handles AI-powered assistance, incident suggestions, and knowledge base integration
|
|
"""
|
|
from django.utils import timezone
|
|
from django.contrib.auth import get_user_model
|
|
from typing import Dict, Any, Optional, List
|
|
import re
|
|
|
|
from ..models import ChatBot, WarRoomMessage
|
|
from knowledge_learning.models import KnowledgeBaseArticle, Postmortem, IncidentPattern
|
|
from incident_intelligence.models import Incident
|
|
|
|
User = get_user_model()
|
|
|
|
|
|
class AIAssistantService:
|
|
"""Service for AI-powered chat assistance"""
|
|
|
|
@staticmethod
|
|
def generate_response(bot: ChatBot, message: WarRoomMessage, context: Dict[str, Any] = None) -> Dict[str, Any]:
|
|
"""Generate AI response to a chat message"""
|
|
try:
|
|
incident = message.war_room.incident
|
|
user = message.sender
|
|
|
|
# Analyze message content
|
|
message_analysis = AIAssistantService._analyze_message(message.content)
|
|
|
|
# Determine response type based on analysis
|
|
if message_analysis['contains_question']:
|
|
response = AIAssistantService._generate_question_response(
|
|
bot, message, message_analysis, context
|
|
)
|
|
elif message_analysis['contains_incident_keywords']:
|
|
response = AIAssistantService._generate_incident_response(
|
|
bot, message, message_analysis, context
|
|
)
|
|
elif message_analysis['contains_help_request']:
|
|
response = AIAssistantService._generate_help_response(
|
|
bot, message, message_analysis, context
|
|
)
|
|
else:
|
|
response = AIAssistantService._generate_general_response(
|
|
bot, message, message_analysis, context
|
|
)
|
|
|
|
# Add confidence score and sources
|
|
response['confidence'] = AIAssistantService._calculate_confidence(response)
|
|
response['sources'] = AIAssistantService._get_response_sources(response)
|
|
|
|
return response
|
|
|
|
except Exception as e:
|
|
return {
|
|
'response': f"I encountered an error while processing your message: {str(e)}",
|
|
'confidence': 0.0,
|
|
'sources': []
|
|
}
|
|
|
|
@staticmethod
|
|
def suggest_similar_incidents(incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
|
"""Suggest similar past incidents based on current incident"""
|
|
try:
|
|
# Find similar incidents based on category and keywords
|
|
similar_incidents = []
|
|
|
|
# Search by category
|
|
category_incidents = Incident.objects.filter(
|
|
category=incident.category,
|
|
status__in=['RESOLVED', 'CLOSED']
|
|
).exclude(id=incident.id)[:limit]
|
|
|
|
for similar_incident in category_incidents:
|
|
similarity_score = AIAssistantService._calculate_incident_similarity(
|
|
incident, similar_incident
|
|
)
|
|
|
|
if similarity_score > 0.3: # Minimum similarity threshold
|
|
similar_incidents.append({
|
|
'id': str(similar_incident.id),
|
|
'title': similar_incident.title,
|
|
'severity': similar_incident.severity,
|
|
'status': similar_incident.status,
|
|
'created_at': similar_incident.created_at.isoformat(),
|
|
'resolved_at': similar_incident.resolved_at.isoformat() if similar_incident.resolved_at else None,
|
|
'resolution_time': str(similar_incident.resolution_time) if similar_incident.resolution_time else None,
|
|
'similarity_score': similarity_score,
|
|
'has_postmortem': similar_incident.postmortems.exists()
|
|
})
|
|
|
|
# Sort by similarity score
|
|
similar_incidents.sort(key=lambda x: x['similarity_score'], reverse=True)
|
|
|
|
return similar_incidents[:limit]
|
|
|
|
except Exception as e:
|
|
return []
|
|
|
|
@staticmethod
|
|
def suggest_knowledge_articles(incident: Incident, message_content: str = None) -> List[Dict[str, Any]]:
|
|
"""Suggest relevant knowledge base articles"""
|
|
try:
|
|
# Extract keywords from incident and message
|
|
keywords = AIAssistantService._extract_keywords(incident, message_content)
|
|
|
|
# Search knowledge base articles
|
|
articles = KnowledgeBaseArticle.objects.filter(
|
|
is_active=True,
|
|
tags__overlap=keywords
|
|
).distinct()[:10]
|
|
|
|
suggested_articles = []
|
|
for article in articles:
|
|
relevance_score = AIAssistantService._calculate_article_relevance(
|
|
article, incident, keywords
|
|
)
|
|
|
|
if relevance_score > 0.2: # Minimum relevance threshold
|
|
suggested_articles.append({
|
|
'id': str(article.id),
|
|
'title': article.title,
|
|
'summary': article.summary,
|
|
'category': article.category,
|
|
'tags': article.tags,
|
|
'relevance_score': relevance_score,
|
|
'url': f"/knowledge/articles/{article.id}/"
|
|
})
|
|
|
|
# Sort by relevance score
|
|
suggested_articles.sort(key=lambda x: x['relevance_score'], reverse=True)
|
|
|
|
return suggested_articles[:5]
|
|
|
|
except Exception as e:
|
|
return []
|
|
|
|
@staticmethod
|
|
def suggest_runbooks(incident: Incident) -> List[Dict[str, Any]]:
|
|
"""Suggest relevant runbooks for the incident"""
|
|
try:
|
|
from automation_orchestration.models import Runbook
|
|
|
|
# Find runbooks that match incident characteristics
|
|
runbooks = Runbook.objects.filter(
|
|
is_active=True,
|
|
categories__contains=[incident.category]
|
|
)
|
|
|
|
suggested_runbooks = []
|
|
for runbook in runbooks:
|
|
if incident.severity in runbook.severity_levels:
|
|
suggested_runbooks.append({
|
|
'id': str(runbook.id),
|
|
'name': runbook.name,
|
|
'description': runbook.description,
|
|
'category': runbook.category,
|
|
'estimated_duration': runbook.estimated_duration,
|
|
'success_rate': runbook.success_rate,
|
|
'last_used': runbook.last_used.isoformat() if runbook.last_used else None
|
|
})
|
|
|
|
return suggested_runbooks[:5]
|
|
|
|
except Exception as e:
|
|
return []
|
|
|
|
@staticmethod
|
|
def detect_incident_patterns(incident: Incident) -> List[Dict[str, Any]]:
|
|
"""Detect patterns in the incident"""
|
|
try:
|
|
# Find matching patterns
|
|
patterns = IncidentPattern.objects.filter(
|
|
is_active=True,
|
|
incidents=incident
|
|
)
|
|
|
|
detected_patterns = []
|
|
for pattern in patterns:
|
|
detected_patterns.append({
|
|
'id': str(pattern.id),
|
|
'name': pattern.name,
|
|
'pattern_type': pattern.pattern_type,
|
|
'description': pattern.description,
|
|
'confidence_score': pattern.confidence_score,
|
|
'frequency': pattern.frequency,
|
|
'last_occurrence': pattern.last_occurrence.isoformat() if pattern.last_occurrence else None,
|
|
'next_predicted_occurrence': pattern.next_predicted_occurrence.isoformat() if pattern.next_predicted_occurrence else None
|
|
})
|
|
|
|
return detected_patterns
|
|
|
|
except Exception as e:
|
|
return []
|
|
|
|
@staticmethod
|
|
def _analyze_message(content: str) -> Dict[str, Any]:
|
|
"""Analyze message content to determine intent"""
|
|
content_lower = content.lower()
|
|
|
|
# Check for questions
|
|
question_patterns = [
|
|
r'\?', r'how\s+', r'what\s+', r'when\s+', r'where\s+', r'why\s+', r'who\s+',
|
|
r'can\s+you\s+', r'could\s+you\s+', r'would\s+you\s+'
|
|
]
|
|
contains_question = any(re.search(pattern, content_lower) for pattern in question_patterns)
|
|
|
|
# Check for incident-related keywords
|
|
incident_keywords = [
|
|
'incident', 'issue', 'problem', 'outage', 'error', 'failure', 'down',
|
|
'severity', 'priority', 'escalate', 'resolve', 'fix'
|
|
]
|
|
contains_incident_keywords = any(keyword in content_lower for keyword in incident_keywords)
|
|
|
|
# Check for help requests
|
|
help_patterns = [
|
|
r'help\s+', r'assist\s+', r'support\s+', r'guidance\s+', r'advice\s+'
|
|
]
|
|
contains_help_request = any(re.search(pattern, content_lower) for pattern in help_patterns)
|
|
|
|
return {
|
|
'contains_question': contains_question,
|
|
'contains_incident_keywords': contains_incident_keywords,
|
|
'contains_help_request': contains_help_request,
|
|
'word_count': len(content.split()),
|
|
'sentiment': AIAssistantService._analyze_sentiment(content)
|
|
}
|
|
|
|
@staticmethod
|
|
def _generate_question_response(bot: ChatBot, message: WarRoomMessage, analysis: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Generate response to a question"""
|
|
incident = message.war_room.incident
|
|
|
|
# Get relevant knowledge articles
|
|
articles = AIAssistantService.suggest_knowledge_articles(incident, message.content)
|
|
|
|
if articles:
|
|
response_text = f"Based on your question, here are some relevant resources:\n\n"
|
|
for article in articles[:3]:
|
|
response_text += f"• **{article['title']}** - {article['summary'][:100]}...\n"
|
|
|
|
response_text += f"\nYou can find more information in our knowledge base."
|
|
else:
|
|
response_text = "I'd be happy to help with your question. Let me search our knowledge base for relevant information."
|
|
|
|
return {
|
|
'response': response_text,
|
|
'response_type': 'question_answer',
|
|
'suggested_articles': articles[:3]
|
|
}
|
|
|
|
@staticmethod
|
|
def _generate_incident_response(bot: ChatBot, message: WarRoomMessage, analysis: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Generate response related to incident management"""
|
|
incident = message.war_room.incident
|
|
|
|
# Suggest similar incidents
|
|
similar_incidents = AIAssistantService.suggest_similar_incidents(incident)
|
|
|
|
# Suggest runbooks
|
|
runbooks = AIAssistantService.suggest_runbooks(incident)
|
|
|
|
response_text = f"I can help with incident management. Here's what I found:\n\n"
|
|
|
|
if similar_incidents:
|
|
response_text += f"**Similar Past Incidents:**\n"
|
|
for incident_data in similar_incidents[:2]:
|
|
response_text += f"• {incident_data['title']} (Similarity: {incident_data['similarity_score']:.1%})\n"
|
|
response_text += "\n"
|
|
|
|
if runbooks:
|
|
response_text += f"**Suggested Runbooks:**\n"
|
|
for runbook in runbooks[:2]:
|
|
response_text += f"• {runbook['name']} - {runbook['description'][:100]}...\n"
|
|
|
|
return {
|
|
'response': response_text,
|
|
'response_type': 'incident_assistance',
|
|
'similar_incidents': similar_incidents[:3],
|
|
'suggested_runbooks': runbooks[:3]
|
|
}
|
|
|
|
@staticmethod
|
|
def _generate_help_response(bot: ChatBot, message: WarRoomMessage, analysis: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Generate help response"""
|
|
response_text = (
|
|
"I'm here to help! I can assist you with:\n\n"
|
|
"• **Incident Management** - Find similar incidents, suggest runbooks\n"
|
|
"• **Knowledge Base** - Search for relevant articles and documentation\n"
|
|
"• **ChatOps Commands** - Execute automation commands like `/status`, `/run playbook <name>`\n"
|
|
"• **Pattern Detection** - Identify recurring issues and patterns\n\n"
|
|
"Just ask me a question or mention what you need help with!"
|
|
)
|
|
|
|
return {
|
|
'response': response_text,
|
|
'response_type': 'help'
|
|
}
|
|
|
|
@staticmethod
|
|
def _generate_general_response(bot: ChatBot, message: WarRoomMessage, analysis: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""Generate general response"""
|
|
incident = message.war_room.incident
|
|
|
|
# Check for patterns
|
|
patterns = AIAssistantService.detect_incident_patterns(incident)
|
|
|
|
if patterns:
|
|
response_text = f"I noticed this incident matches some known patterns:\n\n"
|
|
for pattern in patterns[:2]:
|
|
response_text += f"• **{pattern['name']}** - {pattern['description'][:100]}...\n"
|
|
else:
|
|
response_text = "I'm monitoring this incident. Let me know if you need any assistance with incident response or have questions about similar past incidents."
|
|
|
|
return {
|
|
'response': response_text,
|
|
'response_type': 'general',
|
|
'detected_patterns': patterns
|
|
}
|
|
|
|
@staticmethod
|
|
def _calculate_confidence(response: Dict[str, Any]) -> float:
|
|
"""Calculate confidence score for the response"""
|
|
base_confidence = 0.7
|
|
|
|
# Increase confidence based on available data
|
|
if response.get('suggested_articles'):
|
|
base_confidence += 0.1
|
|
if response.get('similar_incidents'):
|
|
base_confidence += 0.1
|
|
if response.get('suggested_runbooks'):
|
|
base_confidence += 0.1
|
|
|
|
return min(base_confidence, 1.0)
|
|
|
|
@staticmethod
|
|
def _get_response_sources(response: Dict[str, Any]) -> List[str]:
|
|
"""Get sources for the response"""
|
|
sources = []
|
|
|
|
if response.get('suggested_articles'):
|
|
sources.append('Knowledge Base')
|
|
if response.get('similar_incidents'):
|
|
sources.append('Historical Incidents')
|
|
if response.get('suggested_runbooks'):
|
|
sources.append('Runbook Library')
|
|
if response.get('detected_patterns'):
|
|
sources.append('Pattern Analysis')
|
|
|
|
return sources
|
|
|
|
@staticmethod
|
|
def _calculate_incident_similarity(incident1: Incident, incident2: Incident) -> float:
|
|
"""Calculate similarity score between two incidents"""
|
|
similarity = 0.0
|
|
|
|
# Category similarity
|
|
if incident1.category == incident2.category:
|
|
similarity += 0.3
|
|
|
|
# Severity similarity
|
|
if incident1.severity == incident2.severity:
|
|
similarity += 0.2
|
|
|
|
# Text similarity (simplified)
|
|
text1_words = set(incident1.title.lower().split())
|
|
text2_words = set(incident2.title.lower().split())
|
|
|
|
if text1_words and text2_words:
|
|
text_similarity = len(text1_words.intersection(text2_words)) / len(text1_words.union(text2_words))
|
|
similarity += text_similarity * 0.5
|
|
|
|
return similarity
|
|
|
|
@staticmethod
|
|
def _extract_keywords(incident: Incident, message_content: str = None) -> List[str]:
|
|
"""Extract keywords from incident and message"""
|
|
keywords = []
|
|
|
|
# Add incident keywords
|
|
if incident.category:
|
|
keywords.append(incident.category.lower())
|
|
if incident.title:
|
|
keywords.extend(incident.title.lower().split())
|
|
|
|
# Add message keywords
|
|
if message_content:
|
|
keywords.extend(message_content.lower().split())
|
|
|
|
# Remove common words
|
|
stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by'}
|
|
keywords = [word for word in keywords if word not in stop_words and len(word) > 2]
|
|
|
|
return list(set(keywords)) # Remove duplicates
|
|
|
|
@staticmethod
|
|
def _calculate_article_relevance(article: KnowledgeBaseArticle, incident: Incident, keywords: List[str]) -> float:
|
|
"""Calculate relevance score for a knowledge article"""
|
|
relevance = 0.0
|
|
|
|
# Tag overlap
|
|
if article.tags:
|
|
tag_overlap = len(set(article.tags).intersection(set(keywords)))
|
|
relevance += tag_overlap * 0.1
|
|
|
|
# Category match
|
|
if article.category == incident.category:
|
|
relevance += 0.3
|
|
|
|
# Title keyword match
|
|
title_words = set(article.title.lower().split())
|
|
keyword_overlap = len(title_words.intersection(set(keywords)))
|
|
if title_words:
|
|
relevance += (keyword_overlap / len(title_words)) * 0.4
|
|
|
|
return min(relevance, 1.0)
|
|
|
|
@staticmethod
|
|
def _analyze_sentiment(content: str) -> str:
|
|
"""Simple sentiment analysis"""
|
|
positive_words = ['good', 'great', 'excellent', 'fixed', 'resolved', 'working', 'success']
|
|
negative_words = ['bad', 'terrible', 'broken', 'failed', 'error', 'issue', 'problem']
|
|
|
|
content_lower = content.lower()
|
|
positive_count = sum(1 for word in positive_words if word in content_lower)
|
|
negative_count = sum(1 for word in negative_words if word in content_lower)
|
|
|
|
if positive_count > negative_count:
|
|
return 'positive'
|
|
elif negative_count > positive_count:
|
|
return 'negative'
|
|
else:
|
|
return 'neutral'
|