Updates
This commit is contained in:
433
ETB-API/collaboration_war_rooms/services/ai_assistant.py
Normal file
433
ETB-API/collaboration_war_rooms/services/ai_assistant.py
Normal file
@@ -0,0 +1,433 @@
|
||||
"""
|
||||
AI Assistant Service for Chat Integration
|
||||
Handles AI-powered assistance, incident suggestions, and knowledge base integration
|
||||
"""
|
||||
from django.utils import timezone
|
||||
from django.contrib.auth import get_user_model
|
||||
from typing import Dict, Any, Optional, List
|
||||
import re
|
||||
|
||||
from ..models import ChatBot, WarRoomMessage
|
||||
from knowledge_learning.models import KnowledgeBaseArticle, Postmortem, IncidentPattern
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
class AIAssistantService:
|
||||
"""Service for AI-powered chat assistance"""
|
||||
|
||||
@staticmethod
|
||||
def generate_response(bot: ChatBot, message: WarRoomMessage, context: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||
"""Generate AI response to a chat message"""
|
||||
try:
|
||||
incident = message.war_room.incident
|
||||
user = message.sender
|
||||
|
||||
# Analyze message content
|
||||
message_analysis = AIAssistantService._analyze_message(message.content)
|
||||
|
||||
# Determine response type based on analysis
|
||||
if message_analysis['contains_question']:
|
||||
response = AIAssistantService._generate_question_response(
|
||||
bot, message, message_analysis, context
|
||||
)
|
||||
elif message_analysis['contains_incident_keywords']:
|
||||
response = AIAssistantService._generate_incident_response(
|
||||
bot, message, message_analysis, context
|
||||
)
|
||||
elif message_analysis['contains_help_request']:
|
||||
response = AIAssistantService._generate_help_response(
|
||||
bot, message, message_analysis, context
|
||||
)
|
||||
else:
|
||||
response = AIAssistantService._generate_general_response(
|
||||
bot, message, message_analysis, context
|
||||
)
|
||||
|
||||
# Add confidence score and sources
|
||||
response['confidence'] = AIAssistantService._calculate_confidence(response)
|
||||
response['sources'] = AIAssistantService._get_response_sources(response)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'response': f"I encountered an error while processing your message: {str(e)}",
|
||||
'confidence': 0.0,
|
||||
'sources': []
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def suggest_similar_incidents(incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Suggest similar past incidents based on current incident"""
|
||||
try:
|
||||
# Find similar incidents based on category and keywords
|
||||
similar_incidents = []
|
||||
|
||||
# Search by category
|
||||
category_incidents = Incident.objects.filter(
|
||||
category=incident.category,
|
||||
status__in=['RESOLVED', 'CLOSED']
|
||||
).exclude(id=incident.id)[:limit]
|
||||
|
||||
for similar_incident in category_incidents:
|
||||
similarity_score = AIAssistantService._calculate_incident_similarity(
|
||||
incident, similar_incident
|
||||
)
|
||||
|
||||
if similarity_score > 0.3: # Minimum similarity threshold
|
||||
similar_incidents.append({
|
||||
'id': str(similar_incident.id),
|
||||
'title': similar_incident.title,
|
||||
'severity': similar_incident.severity,
|
||||
'status': similar_incident.status,
|
||||
'created_at': similar_incident.created_at.isoformat(),
|
||||
'resolved_at': similar_incident.resolved_at.isoformat() if similar_incident.resolved_at else None,
|
||||
'resolution_time': str(similar_incident.resolution_time) if similar_incident.resolution_time else None,
|
||||
'similarity_score': similarity_score,
|
||||
'has_postmortem': similar_incident.postmortems.exists()
|
||||
})
|
||||
|
||||
# Sort by similarity score
|
||||
similar_incidents.sort(key=lambda x: x['similarity_score'], reverse=True)
|
||||
|
||||
return similar_incidents[:limit]
|
||||
|
||||
except Exception as e:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def suggest_knowledge_articles(incident: Incident, message_content: str = None) -> List[Dict[str, Any]]:
|
||||
"""Suggest relevant knowledge base articles"""
|
||||
try:
|
||||
# Extract keywords from incident and message
|
||||
keywords = AIAssistantService._extract_keywords(incident, message_content)
|
||||
|
||||
# Search knowledge base articles
|
||||
articles = KnowledgeBaseArticle.objects.filter(
|
||||
is_active=True,
|
||||
tags__overlap=keywords
|
||||
).distinct()[:10]
|
||||
|
||||
suggested_articles = []
|
||||
for article in articles:
|
||||
relevance_score = AIAssistantService._calculate_article_relevance(
|
||||
article, incident, keywords
|
||||
)
|
||||
|
||||
if relevance_score > 0.2: # Minimum relevance threshold
|
||||
suggested_articles.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'summary': article.summary,
|
||||
'category': article.category,
|
||||
'tags': article.tags,
|
||||
'relevance_score': relevance_score,
|
||||
'url': f"/knowledge/articles/{article.id}/"
|
||||
})
|
||||
|
||||
# Sort by relevance score
|
||||
suggested_articles.sort(key=lambda x: x['relevance_score'], reverse=True)
|
||||
|
||||
return suggested_articles[:5]
|
||||
|
||||
except Exception as e:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def suggest_runbooks(incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Suggest relevant runbooks for the incident"""
|
||||
try:
|
||||
from automation_orchestration.models import Runbook
|
||||
|
||||
# Find runbooks that match incident characteristics
|
||||
runbooks = Runbook.objects.filter(
|
||||
is_active=True,
|
||||
categories__contains=[incident.category]
|
||||
)
|
||||
|
||||
suggested_runbooks = []
|
||||
for runbook in runbooks:
|
||||
if incident.severity in runbook.severity_levels:
|
||||
suggested_runbooks.append({
|
||||
'id': str(runbook.id),
|
||||
'name': runbook.name,
|
||||
'description': runbook.description,
|
||||
'category': runbook.category,
|
||||
'estimated_duration': runbook.estimated_duration,
|
||||
'success_rate': runbook.success_rate,
|
||||
'last_used': runbook.last_used.isoformat() if runbook.last_used else None
|
||||
})
|
||||
|
||||
return suggested_runbooks[:5]
|
||||
|
||||
except Exception as e:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def detect_incident_patterns(incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Detect patterns in the incident"""
|
||||
try:
|
||||
# Find matching patterns
|
||||
patterns = IncidentPattern.objects.filter(
|
||||
is_active=True,
|
||||
incidents=incident
|
||||
)
|
||||
|
||||
detected_patterns = []
|
||||
for pattern in patterns:
|
||||
detected_patterns.append({
|
||||
'id': str(pattern.id),
|
||||
'name': pattern.name,
|
||||
'pattern_type': pattern.pattern_type,
|
||||
'description': pattern.description,
|
||||
'confidence_score': pattern.confidence_score,
|
||||
'frequency': pattern.frequency,
|
||||
'last_occurrence': pattern.last_occurrence.isoformat() if pattern.last_occurrence else None,
|
||||
'next_predicted_occurrence': pattern.next_predicted_occurrence.isoformat() if pattern.next_predicted_occurrence else None
|
||||
})
|
||||
|
||||
return detected_patterns
|
||||
|
||||
except Exception as e:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _analyze_message(content: str) -> Dict[str, Any]:
|
||||
"""Analyze message content to determine intent"""
|
||||
content_lower = content.lower()
|
||||
|
||||
# Check for questions
|
||||
question_patterns = [
|
||||
r'\?', r'how\s+', r'what\s+', r'when\s+', r'where\s+', r'why\s+', r'who\s+',
|
||||
r'can\s+you\s+', r'could\s+you\s+', r'would\s+you\s+'
|
||||
]
|
||||
contains_question = any(re.search(pattern, content_lower) for pattern in question_patterns)
|
||||
|
||||
# Check for incident-related keywords
|
||||
incident_keywords = [
|
||||
'incident', 'issue', 'problem', 'outage', 'error', 'failure', 'down',
|
||||
'severity', 'priority', 'escalate', 'resolve', 'fix'
|
||||
]
|
||||
contains_incident_keywords = any(keyword in content_lower for keyword in incident_keywords)
|
||||
|
||||
# Check for help requests
|
||||
help_patterns = [
|
||||
r'help\s+', r'assist\s+', r'support\s+', r'guidance\s+', r'advice\s+'
|
||||
]
|
||||
contains_help_request = any(re.search(pattern, content_lower) for pattern in help_patterns)
|
||||
|
||||
return {
|
||||
'contains_question': contains_question,
|
||||
'contains_incident_keywords': contains_incident_keywords,
|
||||
'contains_help_request': contains_help_request,
|
||||
'word_count': len(content.split()),
|
||||
'sentiment': AIAssistantService._analyze_sentiment(content)
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _generate_question_response(bot: ChatBot, message: WarRoomMessage, analysis: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Generate response to a question"""
|
||||
incident = message.war_room.incident
|
||||
|
||||
# Get relevant knowledge articles
|
||||
articles = AIAssistantService.suggest_knowledge_articles(incident, message.content)
|
||||
|
||||
if articles:
|
||||
response_text = f"Based on your question, here are some relevant resources:\n\n"
|
||||
for article in articles[:3]:
|
||||
response_text += f"• **{article['title']}** - {article['summary'][:100]}...\n"
|
||||
|
||||
response_text += f"\nYou can find more information in our knowledge base."
|
||||
else:
|
||||
response_text = "I'd be happy to help with your question. Let me search our knowledge base for relevant information."
|
||||
|
||||
return {
|
||||
'response': response_text,
|
||||
'response_type': 'question_answer',
|
||||
'suggested_articles': articles[:3]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _generate_incident_response(bot: ChatBot, message: WarRoomMessage, analysis: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Generate response related to incident management"""
|
||||
incident = message.war_room.incident
|
||||
|
||||
# Suggest similar incidents
|
||||
similar_incidents = AIAssistantService.suggest_similar_incidents(incident)
|
||||
|
||||
# Suggest runbooks
|
||||
runbooks = AIAssistantService.suggest_runbooks(incident)
|
||||
|
||||
response_text = f"I can help with incident management. Here's what I found:\n\n"
|
||||
|
||||
if similar_incidents:
|
||||
response_text += f"**Similar Past Incidents:**\n"
|
||||
for incident_data in similar_incidents[:2]:
|
||||
response_text += f"• {incident_data['title']} (Similarity: {incident_data['similarity_score']:.1%})\n"
|
||||
response_text += "\n"
|
||||
|
||||
if runbooks:
|
||||
response_text += f"**Suggested Runbooks:**\n"
|
||||
for runbook in runbooks[:2]:
|
||||
response_text += f"• {runbook['name']} - {runbook['description'][:100]}...\n"
|
||||
|
||||
return {
|
||||
'response': response_text,
|
||||
'response_type': 'incident_assistance',
|
||||
'similar_incidents': similar_incidents[:3],
|
||||
'suggested_runbooks': runbooks[:3]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _generate_help_response(bot: ChatBot, message: WarRoomMessage, analysis: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Generate help response"""
|
||||
response_text = (
|
||||
"I'm here to help! I can assist you with:\n\n"
|
||||
"• **Incident Management** - Find similar incidents, suggest runbooks\n"
|
||||
"• **Knowledge Base** - Search for relevant articles and documentation\n"
|
||||
"• **ChatOps Commands** - Execute automation commands like `/status`, `/run playbook <name>`\n"
|
||||
"• **Pattern Detection** - Identify recurring issues and patterns\n\n"
|
||||
"Just ask me a question or mention what you need help with!"
|
||||
)
|
||||
|
||||
return {
|
||||
'response': response_text,
|
||||
'response_type': 'help'
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _generate_general_response(bot: ChatBot, message: WarRoomMessage, analysis: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Generate general response"""
|
||||
incident = message.war_room.incident
|
||||
|
||||
# Check for patterns
|
||||
patterns = AIAssistantService.detect_incident_patterns(incident)
|
||||
|
||||
if patterns:
|
||||
response_text = f"I noticed this incident matches some known patterns:\n\n"
|
||||
for pattern in patterns[:2]:
|
||||
response_text += f"• **{pattern['name']}** - {pattern['description'][:100]}...\n"
|
||||
else:
|
||||
response_text = "I'm monitoring this incident. Let me know if you need any assistance with incident response or have questions about similar past incidents."
|
||||
|
||||
return {
|
||||
'response': response_text,
|
||||
'response_type': 'general',
|
||||
'detected_patterns': patterns
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _calculate_confidence(response: Dict[str, Any]) -> float:
|
||||
"""Calculate confidence score for the response"""
|
||||
base_confidence = 0.7
|
||||
|
||||
# Increase confidence based on available data
|
||||
if response.get('suggested_articles'):
|
||||
base_confidence += 0.1
|
||||
if response.get('similar_incidents'):
|
||||
base_confidence += 0.1
|
||||
if response.get('suggested_runbooks'):
|
||||
base_confidence += 0.1
|
||||
|
||||
return min(base_confidence, 1.0)
|
||||
|
||||
@staticmethod
|
||||
def _get_response_sources(response: Dict[str, Any]) -> List[str]:
|
||||
"""Get sources for the response"""
|
||||
sources = []
|
||||
|
||||
if response.get('suggested_articles'):
|
||||
sources.append('Knowledge Base')
|
||||
if response.get('similar_incidents'):
|
||||
sources.append('Historical Incidents')
|
||||
if response.get('suggested_runbooks'):
|
||||
sources.append('Runbook Library')
|
||||
if response.get('detected_patterns'):
|
||||
sources.append('Pattern Analysis')
|
||||
|
||||
return sources
|
||||
|
||||
@staticmethod
|
||||
def _calculate_incident_similarity(incident1: Incident, incident2: Incident) -> float:
|
||||
"""Calculate similarity score between two incidents"""
|
||||
similarity = 0.0
|
||||
|
||||
# Category similarity
|
||||
if incident1.category == incident2.category:
|
||||
similarity += 0.3
|
||||
|
||||
# Severity similarity
|
||||
if incident1.severity == incident2.severity:
|
||||
similarity += 0.2
|
||||
|
||||
# Text similarity (simplified)
|
||||
text1_words = set(incident1.title.lower().split())
|
||||
text2_words = set(incident2.title.lower().split())
|
||||
|
||||
if text1_words and text2_words:
|
||||
text_similarity = len(text1_words.intersection(text2_words)) / len(text1_words.union(text2_words))
|
||||
similarity += text_similarity * 0.5
|
||||
|
||||
return similarity
|
||||
|
||||
@staticmethod
|
||||
def _extract_keywords(incident: Incident, message_content: str = None) -> List[str]:
|
||||
"""Extract keywords from incident and message"""
|
||||
keywords = []
|
||||
|
||||
# Add incident keywords
|
||||
if incident.category:
|
||||
keywords.append(incident.category.lower())
|
||||
if incident.title:
|
||||
keywords.extend(incident.title.lower().split())
|
||||
|
||||
# Add message keywords
|
||||
if message_content:
|
||||
keywords.extend(message_content.lower().split())
|
||||
|
||||
# Remove common words
|
||||
stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by'}
|
||||
keywords = [word for word in keywords if word not in stop_words and len(word) > 2]
|
||||
|
||||
return list(set(keywords)) # Remove duplicates
|
||||
|
||||
@staticmethod
|
||||
def _calculate_article_relevance(article: KnowledgeBaseArticle, incident: Incident, keywords: List[str]) -> float:
|
||||
"""Calculate relevance score for a knowledge article"""
|
||||
relevance = 0.0
|
||||
|
||||
# Tag overlap
|
||||
if article.tags:
|
||||
tag_overlap = len(set(article.tags).intersection(set(keywords)))
|
||||
relevance += tag_overlap * 0.1
|
||||
|
||||
# Category match
|
||||
if article.category == incident.category:
|
||||
relevance += 0.3
|
||||
|
||||
# Title keyword match
|
||||
title_words = set(article.title.lower().split())
|
||||
keyword_overlap = len(title_words.intersection(set(keywords)))
|
||||
if title_words:
|
||||
relevance += (keyword_overlap / len(title_words)) * 0.4
|
||||
|
||||
return min(relevance, 1.0)
|
||||
|
||||
@staticmethod
|
||||
def _analyze_sentiment(content: str) -> str:
|
||||
"""Simple sentiment analysis"""
|
||||
positive_words = ['good', 'great', 'excellent', 'fixed', 'resolved', 'working', 'success']
|
||||
negative_words = ['bad', 'terrible', 'broken', 'failed', 'error', 'issue', 'problem']
|
||||
|
||||
content_lower = content.lower()
|
||||
positive_count = sum(1 for word in positive_words if word in content_lower)
|
||||
negative_count = sum(1 for word in negative_words if word in content_lower)
|
||||
|
||||
if positive_count > negative_count:
|
||||
return 'positive'
|
||||
elif negative_count > positive_count:
|
||||
return 'negative'
|
||||
else:
|
||||
return 'neutral'
|
||||
366
ETB-API/collaboration_war_rooms/services/automation_commands.py
Normal file
366
ETB-API/collaboration_war_rooms/services/automation_commands.py
Normal file
@@ -0,0 +1,366 @@
|
||||
"""
|
||||
Automation Commands Service for ChatOps Integration
|
||||
Handles execution of automation commands via chat interface
|
||||
"""
|
||||
from django.utils import timezone
|
||||
from django.contrib.auth import get_user_model
|
||||
from typing import Dict, Any, Optional, List
|
||||
|
||||
from ..models import ChatCommand, WarRoomMessage
|
||||
from automation_orchestration.models import Runbook, RunbookExecution, AutoRemediation, AutoRemediationExecution
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
class AutomationCommandService:
|
||||
"""Service for handling automation commands in chat"""
|
||||
|
||||
@staticmethod
|
||||
def execute_runbook_command(chat_command: ChatCommand, runbook_name: str, user: User) -> Dict[str, Any]:
|
||||
"""Execute a runbook via chat command"""
|
||||
try:
|
||||
incident = chat_command.message.war_room.incident
|
||||
|
||||
# Find the runbook
|
||||
runbook = Runbook.objects.filter(
|
||||
name__icontains=runbook_name,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if not runbook:
|
||||
return {
|
||||
'error': f'Runbook "{runbook_name}" not found or inactive',
|
||||
'suggestions': AutomationCommandService._get_runbook_suggestions(runbook_name)
|
||||
}
|
||||
|
||||
# Check if runbook applies to this incident
|
||||
if not AutomationCommandService._runbook_applies_to_incident(runbook, incident):
|
||||
return {
|
||||
'error': f'Runbook "{runbook.name}" does not apply to this incident type',
|
||||
'incident_category': incident.category,
|
||||
'incident_severity': incident.severity
|
||||
}
|
||||
|
||||
# Execute the runbook
|
||||
execution = RunbookExecution.objects.create(
|
||||
runbook=runbook,
|
||||
incident=incident,
|
||||
triggered_by=user,
|
||||
trigger_type='CHAT_COMMAND',
|
||||
trigger_data={
|
||||
'chat_command_id': str(chat_command.id),
|
||||
'command_text': chat_command.command_text
|
||||
}
|
||||
)
|
||||
|
||||
# Update chat command with execution reference
|
||||
chat_command.automation_execution = execution
|
||||
chat_command.save()
|
||||
|
||||
# Create status message in chat
|
||||
AutomationCommandService._create_execution_status_message(
|
||||
chat_command.message.war_room,
|
||||
f"🚀 **Runbook Execution Started**\n\n"
|
||||
f"**Runbook:** {runbook.name}\n"
|
||||
f"**Execution ID:** {execution.id}\n"
|
||||
f"**Triggered by:** {user.username}\n"
|
||||
f"**Status:** {execution.status}\n\n"
|
||||
f"Monitor progress in the automation dashboard."
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'execution_id': str(execution.id),
|
||||
'runbook_name': runbook.name,
|
||||
'status': execution.status,
|
||||
'message': f'Runbook "{runbook.name}" execution started successfully'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'error': f'Failed to execute runbook: {str(e)}'
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def execute_auto_remediation_command(chat_command: ChatCommand, remediation_name: str, user: User) -> Dict[str, Any]:
|
||||
"""Execute auto-remediation via chat command"""
|
||||
try:
|
||||
incident = chat_command.message.war_room.incident
|
||||
|
||||
# Find the auto-remediation
|
||||
remediation = AutoRemediation.objects.filter(
|
||||
name__icontains=remediation_name,
|
||||
is_active=True
|
||||
).first()
|
||||
|
||||
if not remediation:
|
||||
return {
|
||||
'error': f'Auto-remediation "{remediation_name}" not found or inactive',
|
||||
'suggestions': AutomationCommandService._get_remediation_suggestions(remediation_name)
|
||||
}
|
||||
|
||||
# Check if remediation applies to this incident
|
||||
if not AutomationCommandService._remediation_applies_to_incident(remediation, incident):
|
||||
return {
|
||||
'error': f'Auto-remediation "{remediation.name}" does not apply to this incident type',
|
||||
'incident_category': incident.category,
|
||||
'incident_severity': incident.severity
|
||||
}
|
||||
|
||||
# Execute the auto-remediation
|
||||
execution = AutoRemediationExecution.objects.create(
|
||||
auto_remediation=remediation,
|
||||
incident=incident,
|
||||
triggered_by=user,
|
||||
trigger_type='CHAT_COMMAND',
|
||||
trigger_data={
|
||||
'chat_command_id': str(chat_command.id),
|
||||
'command_text': chat_command.command_text
|
||||
}
|
||||
)
|
||||
|
||||
# Create status message in chat
|
||||
AutomationCommandService._create_execution_status_message(
|
||||
chat_command.message.war_room,
|
||||
f"🔧 **Auto-Remediation Started**\n\n"
|
||||
f"**Remediation:** {remediation.name}\n"
|
||||
f"**Execution ID:** {execution.id}\n"
|
||||
f"**Triggered by:** {user.username}\n"
|
||||
f"**Status:** {execution.status}\n\n"
|
||||
f"Monitor progress in the automation dashboard."
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'execution_id': str(execution.id),
|
||||
'remediation_name': remediation.name,
|
||||
'status': execution.status,
|
||||
'message': f'Auto-remediation "{remediation.name}" execution started successfully'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'error': f'Failed to execute auto-remediation: {str(e)}'
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def get_incident_status(chat_command: ChatCommand) -> Dict[str, Any]:
|
||||
"""Get comprehensive incident status"""
|
||||
try:
|
||||
incident = chat_command.message.war_room.incident
|
||||
|
||||
# Get SLA status
|
||||
from .sla_notifications import SLANotificationService
|
||||
sla_status = SLANotificationService.get_sla_status_for_incident(incident)
|
||||
|
||||
# Get recent runbook executions
|
||||
recent_executions = RunbookExecution.objects.filter(
|
||||
incident=incident
|
||||
).order_by('-created_at')[:5]
|
||||
|
||||
executions_data = []
|
||||
for execution in recent_executions:
|
||||
executions_data.append({
|
||||
'id': str(execution.id),
|
||||
'runbook_name': execution.runbook.name,
|
||||
'status': execution.status,
|
||||
'started_at': execution.started_at.isoformat() if execution.started_at else None,
|
||||
'completed_at': execution.completed_at.isoformat() if execution.completed_at else None
|
||||
})
|
||||
|
||||
return {
|
||||
'incident_id': str(incident.id),
|
||||
'title': incident.title,
|
||||
'status': incident.status,
|
||||
'severity': incident.severity,
|
||||
'priority': incident.priority,
|
||||
'category': incident.category,
|
||||
'assigned_to': incident.assigned_to.username if incident.assigned_to else None,
|
||||
'reporter': incident.reporter.username if incident.reporter else None,
|
||||
'created_at': incident.created_at.isoformat(),
|
||||
'updated_at': incident.updated_at.isoformat(),
|
||||
'resolution_time': str(incident.resolution_time) if incident.resolution_time else None,
|
||||
'sla_status': sla_status,
|
||||
'recent_executions': executions_data,
|
||||
'automation_enabled': incident.automation_enabled,
|
||||
'runbook_suggested': incident.runbook_suggested,
|
||||
'auto_remediation_attempted': incident.auto_remediation_attempted
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'error': f'Failed to get incident status: {str(e)}'
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def list_available_runbooks(incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""List runbooks available for the incident"""
|
||||
try:
|
||||
runbooks = Runbook.objects.filter(is_active=True)
|
||||
available_runbooks = []
|
||||
|
||||
for runbook in runbooks:
|
||||
if AutomationCommandService._runbook_applies_to_incident(runbook, incident):
|
||||
available_runbooks.append({
|
||||
'id': str(runbook.id),
|
||||
'name': runbook.name,
|
||||
'description': runbook.description,
|
||||
'category': runbook.category,
|
||||
'severity_levels': runbook.severity_levels,
|
||||
'estimated_duration': runbook.estimated_duration
|
||||
})
|
||||
|
||||
return available_runbooks
|
||||
|
||||
except Exception as e:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def list_available_remediations(incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""List auto-remediations available for the incident"""
|
||||
try:
|
||||
remediations = AutoRemediation.objects.filter(is_active=True)
|
||||
available_remediations = []
|
||||
|
||||
for remediation in remediations:
|
||||
if AutomationCommandService._remediation_applies_to_incident(remediation, incident):
|
||||
available_remediations.append({
|
||||
'id': str(remediation.id),
|
||||
'name': remediation.name,
|
||||
'description': remediation.description,
|
||||
'category': remediation.category,
|
||||
'severity_levels': remediation.severity_levels,
|
||||
'estimated_duration': remediation.estimated_duration
|
||||
})
|
||||
|
||||
return available_remediations
|
||||
|
||||
except Exception as e:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _runbook_applies_to_incident(runbook: Runbook, incident: Incident) -> bool:
|
||||
"""Check if runbook applies to the incident"""
|
||||
# Check categories
|
||||
if runbook.categories and incident.category not in runbook.categories:
|
||||
return False
|
||||
|
||||
# Check severity levels
|
||||
if runbook.severity_levels and incident.severity not in runbook.severity_levels:
|
||||
return False
|
||||
|
||||
# Check if runbook is active
|
||||
if not runbook.is_active:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _remediation_applies_to_incident(remediation: AutoRemediation, incident: Incident) -> bool:
|
||||
"""Check if auto-remediation applies to the incident"""
|
||||
# Check categories
|
||||
if remediation.categories and incident.category not in remediation.categories:
|
||||
return False
|
||||
|
||||
# Check severity levels
|
||||
if remediation.severity_levels and incident.severity not in remediation.severity_levels:
|
||||
return False
|
||||
|
||||
# Check if remediation is active
|
||||
if not remediation.is_active:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _get_runbook_suggestions(partial_name: str) -> List[str]:
|
||||
"""Get runbook name suggestions based on partial input"""
|
||||
try:
|
||||
runbooks = Runbook.objects.filter(
|
||||
name__icontains=partial_name,
|
||||
is_active=True
|
||||
).values_list('name', flat=True)[:5]
|
||||
|
||||
return list(runbooks)
|
||||
except:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _get_remediation_suggestions(partial_name: str) -> List[str]:
|
||||
"""Get auto-remediation name suggestions based on partial input"""
|
||||
try:
|
||||
remediations = AutoRemediation.objects.filter(
|
||||
name__icontains=partial_name,
|
||||
is_active=True
|
||||
).values_list('name', flat=True)[:5]
|
||||
|
||||
return list(remediations)
|
||||
except:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _create_execution_status_message(war_room: 'WarRoom', content: str):
|
||||
"""Create a status message in the war room"""
|
||||
try:
|
||||
WarRoomMessage.objects.create(
|
||||
war_room=war_room,
|
||||
content=content,
|
||||
message_type='SYSTEM',
|
||||
sender=None,
|
||||
sender_name='Automation System',
|
||||
external_data={
|
||||
'message_type': 'automation_status'
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error creating status message: {e}")
|
||||
|
||||
@staticmethod
|
||||
def update_execution_status(execution_id: str, status: str, result: Dict[str, Any] = None):
|
||||
"""Update execution status and notify chat room"""
|
||||
try:
|
||||
# Find the chat command that triggered this execution
|
||||
chat_command = ChatCommand.objects.filter(
|
||||
automation_execution_id=execution_id
|
||||
).first()
|
||||
|
||||
if not chat_command:
|
||||
return False
|
||||
|
||||
# Update chat command status
|
||||
chat_command.execution_status = status
|
||||
if result:
|
||||
chat_command.execution_result = result
|
||||
chat_command.save()
|
||||
|
||||
# Create status update message
|
||||
status_emoji = {
|
||||
'SUCCESS': '✅',
|
||||
'FAILED': '❌',
|
||||
'RUNNING': '🔄',
|
||||
'CANCELLED': '⏹️'
|
||||
}.get(status, '📊')
|
||||
|
||||
message_content = (
|
||||
f"{status_emoji} **Execution Status Update**\n\n"
|
||||
f"**Status:** {status}\n"
|
||||
f"**Execution ID:** {execution_id}\n"
|
||||
)
|
||||
|
||||
if result:
|
||||
if 'error' in result:
|
||||
message_content += f"**Error:** {result['error']}\n"
|
||||
if 'output' in result:
|
||||
message_content += f"**Output:** {result['output'][:200]}...\n"
|
||||
|
||||
AutomationCommandService._create_execution_status_message(
|
||||
chat_command.message.war_room,
|
||||
message_content
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error updating execution status: {e}")
|
||||
return False
|
||||
@@ -0,0 +1,351 @@
|
||||
"""
|
||||
Compliance Integration Service for Chat
|
||||
Handles file classification, audit trails, and compliance requirements
|
||||
"""
|
||||
from django.utils import timezone
|
||||
from django.contrib.auth import get_user_model
|
||||
from typing import Dict, Any, Optional, List
|
||||
import hashlib
|
||||
import mimetypes
|
||||
|
||||
from ..models import ChatFile, WarRoomMessage
|
||||
from compliance_governance.models import DataClassification, AuditLog, CompliancePolicy
|
||||
from security.models import DataClassification as SecurityDataClassification
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
class ComplianceIntegrationService:
|
||||
"""Service for handling compliance requirements in chat"""
|
||||
|
||||
@staticmethod
|
||||
def classify_file(file_path: str, filename: str, file_size: int, user: User) -> Dict[str, Any]:
|
||||
"""Classify a file based on content and context"""
|
||||
try:
|
||||
# Get file MIME type
|
||||
mime_type, _ = mimetypes.guess_type(filename)
|
||||
|
||||
# Calculate file hash
|
||||
file_hash = ComplianceIntegrationService._calculate_file_hash(file_path)
|
||||
|
||||
# Determine classification based on file type and content
|
||||
classification_level = ComplianceIntegrationService._determine_classification(
|
||||
filename, mime_type, file_size
|
||||
)
|
||||
|
||||
# Get or create data classification
|
||||
data_classification = ComplianceIntegrationService._get_or_create_classification(
|
||||
classification_level
|
||||
)
|
||||
|
||||
return {
|
||||
'classification_level': classification_level,
|
||||
'data_classification_id': str(data_classification.id) if data_classification else None,
|
||||
'file_hash': file_hash,
|
||||
'mime_type': mime_type,
|
||||
'is_encrypted': classification_level in ['CONFIDENTIAL', 'RESTRICTED', 'TOP_SECRET'],
|
||||
'retention_period': ComplianceIntegrationService._get_retention_period(classification_level)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'error': f'Failed to classify file: {str(e)}',
|
||||
'classification_level': 'PUBLIC', # Default to public on error
|
||||
'file_hash': None,
|
||||
'mime_type': None,
|
||||
'is_encrypted': False
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create_audit_log_entry(
|
||||
action: str,
|
||||
user: User,
|
||||
resource_type: str,
|
||||
resource_id: str,
|
||||
details: Dict[str, Any] = None
|
||||
) -> bool:
|
||||
"""Create an audit log entry for compliance"""
|
||||
try:
|
||||
AuditLog.objects.create(
|
||||
action=action,
|
||||
user=user,
|
||||
resource_type=resource_type,
|
||||
resource_id=resource_id,
|
||||
timestamp=timezone.now(),
|
||||
details=details or {},
|
||||
ip_address=ComplianceIntegrationService._get_user_ip(user),
|
||||
user_agent=ComplianceIntegrationService._get_user_agent(user)
|
||||
)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error creating audit log entry: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def log_chat_message_access(message: WarRoomMessage, user: User, action: str = 'access'):
|
||||
"""Log access to chat messages for audit trail"""
|
||||
try:
|
||||
ComplianceIntegrationService.create_audit_log_entry(
|
||||
action=f'chat_message_{action}',
|
||||
user=user,
|
||||
resource_type='chat_message',
|
||||
resource_id=str(message.id),
|
||||
details={
|
||||
'war_room_id': str(message.war_room.id),
|
||||
'incident_id': str(message.war_room.incident.id),
|
||||
'message_type': message.message_type,
|
||||
'sender': message.sender.username if message.sender else None,
|
||||
'content_length': len(message.content),
|
||||
'is_encrypted': message.is_encrypted
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error logging chat message access: {e}")
|
||||
|
||||
@staticmethod
|
||||
def log_file_access(file_obj: ChatFile, user: User, action: str = 'access'):
|
||||
"""Log file access for audit trail"""
|
||||
try:
|
||||
ComplianceIntegrationService.create_audit_log_entry(
|
||||
action=f'file_{action}',
|
||||
user=user,
|
||||
resource_type='chat_file',
|
||||
resource_id=str(file_obj.id),
|
||||
details={
|
||||
'filename': file_obj.original_filename,
|
||||
'file_type': file_obj.file_type,
|
||||
'file_size': file_obj.file_size,
|
||||
'data_classification': file_obj.data_classification.level if file_obj.data_classification else None,
|
||||
'is_encrypted': file_obj.is_encrypted,
|
||||
'message_id': str(file_obj.message.id),
|
||||
'incident_id': str(file_obj.message.war_room.incident.id)
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error logging file access: {e}")
|
||||
|
||||
@staticmethod
|
||||
def check_compliance_policies(incident_id: str, user: User) -> Dict[str, Any]:
|
||||
"""Check compliance policies for incident chat access"""
|
||||
try:
|
||||
# Get applicable compliance policies
|
||||
policies = CompliancePolicy.objects.filter(
|
||||
is_active=True,
|
||||
applies_to_incidents=True
|
||||
)
|
||||
|
||||
compliance_status = {
|
||||
'policies_checked': len(policies),
|
||||
'violations': [],
|
||||
'warnings': [],
|
||||
'recommendations': []
|
||||
}
|
||||
|
||||
for policy in policies:
|
||||
# Check if policy applies to this incident
|
||||
if ComplianceIntegrationService._policy_applies_to_incident(policy, incident_id):
|
||||
violations = ComplianceIntegrationService._check_policy_violations(
|
||||
policy, incident_id, user
|
||||
)
|
||||
|
||||
if violations:
|
||||
compliance_status['violations'].extend(violations)
|
||||
|
||||
return compliance_status
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'error': f'Failed to check compliance policies: {str(e)}'
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def export_chat_logs_for_compliance(
|
||||
incident_id: str,
|
||||
start_date: timezone.datetime,
|
||||
end_date: timezone.datetime,
|
||||
user: User
|
||||
) -> Dict[str, Any]:
|
||||
"""Export chat logs for compliance reporting"""
|
||||
try:
|
||||
from ..models import WarRoom
|
||||
|
||||
# Get war room for incident
|
||||
war_room = WarRoom.objects.filter(incident_id=incident_id).first()
|
||||
if not war_room:
|
||||
return {'error': 'War room not found for incident'}
|
||||
|
||||
# Get messages in date range
|
||||
messages = WarRoomMessage.objects.filter(
|
||||
war_room=war_room,
|
||||
created_at__gte=start_date,
|
||||
created_at__lte=end_date
|
||||
).order_by('created_at')
|
||||
|
||||
# Prepare export data
|
||||
export_data = {
|
||||
'incident_id': incident_id,
|
||||
'war_room_id': str(war_room.id),
|
||||
'export_date': timezone.now().isoformat(),
|
||||
'exported_by': user.username,
|
||||
'date_range': {
|
||||
'start': start_date.isoformat(),
|
||||
'end': end_date.isoformat()
|
||||
},
|
||||
'message_count': messages.count(),
|
||||
'messages': []
|
||||
}
|
||||
|
||||
for message in messages:
|
||||
message_data = {
|
||||
'id': str(message.id),
|
||||
'timestamp': message.created_at.isoformat(),
|
||||
'sender': message.sender.username if message.sender else message.sender_name,
|
||||
'message_type': message.message_type,
|
||||
'content': message.content,
|
||||
'is_encrypted': message.is_encrypted,
|
||||
'is_pinned': message.is_pinned,
|
||||
'attachments': [
|
||||
{
|
||||
'id': str(attachment.id),
|
||||
'filename': attachment.original_filename,
|
||||
'file_type': attachment.file_type,
|
||||
'file_size': attachment.file_size,
|
||||
'data_classification': attachment.data_classification.level if attachment.data_classification else None,
|
||||
'is_encrypted': attachment.is_encrypted,
|
||||
'file_hash': attachment.file_hash
|
||||
}
|
||||
for attachment in message.chat_files.all()
|
||||
]
|
||||
}
|
||||
export_data['messages'].append(message_data)
|
||||
|
||||
# Log the export action
|
||||
ComplianceIntegrationService.create_audit_log_entry(
|
||||
action='export_chat_logs',
|
||||
user=user,
|
||||
resource_type='incident',
|
||||
resource_id=incident_id,
|
||||
details={
|
||||
'message_count': export_data['message_count'],
|
||||
'date_range': export_data['date_range']
|
||||
}
|
||||
)
|
||||
|
||||
return export_data
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'error': f'Failed to export chat logs: {str(e)}'
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _calculate_file_hash(file_path: str) -> str:
|
||||
"""Calculate SHA-256 hash of file"""
|
||||
try:
|
||||
hash_sha256 = hashlib.sha256()
|
||||
with open(file_path, "rb") as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
hash_sha256.update(chunk)
|
||||
return hash_sha256.hexdigest()
|
||||
except:
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def _determine_classification(filename: str, mime_type: str, file_size: int) -> str:
|
||||
"""Determine data classification level based on file characteristics"""
|
||||
# Check for sensitive file extensions
|
||||
sensitive_extensions = ['.key', '.pem', '.p12', '.pfx', '.crt', '.cer']
|
||||
if any(filename.lower().endswith(ext) for ext in sensitive_extensions):
|
||||
return 'CONFIDENTIAL'
|
||||
|
||||
# Check for log files
|
||||
if filename.lower().endswith('.log') or 'log' in filename.lower():
|
||||
return 'INTERNAL'
|
||||
|
||||
# Check for configuration files
|
||||
config_extensions = ['.conf', '.config', '.ini', '.yaml', '.yml', '.json']
|
||||
if any(filename.lower().endswith(ext) for ext in config_extensions):
|
||||
return 'INTERNAL'
|
||||
|
||||
# Check for database files
|
||||
db_extensions = ['.db', '.sqlite', '.sql', '.dump']
|
||||
if any(filename.lower().endswith(ext) for ext in db_extensions):
|
||||
return 'CONFIDENTIAL'
|
||||
|
||||
# Check file size (large files might be sensitive)
|
||||
if file_size > 100 * 1024 * 1024: # 100MB
|
||||
return 'INTERNAL'
|
||||
|
||||
# Default classification
|
||||
return 'PUBLIC'
|
||||
|
||||
@staticmethod
|
||||
def _get_or_create_classification(level: str):
|
||||
"""Get or create data classification object"""
|
||||
try:
|
||||
# Try to get from security module first
|
||||
classification = SecurityDataClassification.objects.filter(level=level).first()
|
||||
if classification:
|
||||
return classification
|
||||
|
||||
# Create new classification if not found
|
||||
classification = SecurityDataClassification.objects.create(
|
||||
level=level,
|
||||
description=f'Data classification level: {level}',
|
||||
retention_period_days=ComplianceIntegrationService._get_retention_period(level)
|
||||
)
|
||||
return classification
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error getting/creating classification: {e}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _get_retention_period(classification_level: str) -> int:
|
||||
"""Get retention period in days based on classification level"""
|
||||
retention_periods = {
|
||||
'PUBLIC': 365, # 1 year
|
||||
'INTERNAL': 1095, # 3 years
|
||||
'CONFIDENTIAL': 2555, # 7 years
|
||||
'RESTRICTED': 3650, # 10 years
|
||||
'TOP_SECRET': 3650 # 10 years
|
||||
}
|
||||
return retention_periods.get(classification_level, 365)
|
||||
|
||||
@staticmethod
|
||||
def _get_user_ip(user: User) -> str:
|
||||
"""Get user IP address (placeholder implementation)"""
|
||||
# This would be implemented based on your request handling
|
||||
return "127.0.0.1"
|
||||
|
||||
@staticmethod
|
||||
def _get_user_agent(user: User) -> str:
|
||||
"""Get user agent (placeholder implementation)"""
|
||||
# This would be implemented based on your request handling
|
||||
return "Chat System"
|
||||
|
||||
@staticmethod
|
||||
def _policy_applies_to_incident(policy: CompliancePolicy, incident_id: str) -> bool:
|
||||
"""Check if compliance policy applies to incident"""
|
||||
# This would check policy conditions against incident
|
||||
# For now, return True for all active policies
|
||||
return policy.is_active
|
||||
|
||||
@staticmethod
|
||||
def _check_policy_violations(policy: CompliancePolicy, incident_id: str, user: User) -> List[str]:
|
||||
"""Check for policy violations"""
|
||||
violations = []
|
||||
|
||||
# Example policy checks
|
||||
if policy.name == "Data Retention Policy":
|
||||
# Check if chat logs are being retained properly
|
||||
pass
|
||||
|
||||
if policy.name == "Access Control Policy":
|
||||
# Check if user has appropriate access
|
||||
pass
|
||||
|
||||
return violations
|
||||
287
ETB-API/collaboration_war_rooms/services/sla_notifications.py
Normal file
287
ETB-API/collaboration_war_rooms/services/sla_notifications.py
Normal file
@@ -0,0 +1,287 @@
|
||||
"""
|
||||
SLA Notifications Service for Chat Integration
|
||||
Handles SLA threshold notifications and escalation alerts in chat rooms
|
||||
"""
|
||||
from django.utils import timezone
|
||||
from django.db.models import Q
|
||||
from typing import Dict, Any, Optional, List
|
||||
|
||||
from ..models import WarRoom, WarRoomMessage
|
||||
from sla_oncall.models import SLAInstance, EscalationInstance, EscalationPolicy
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
|
||||
class SLANotificationService:
|
||||
"""Service for handling SLA-related notifications in chat rooms"""
|
||||
|
||||
@staticmethod
|
||||
def send_sla_warning_notification(sla_instance: SLAInstance, threshold_percent: float = 80.0):
|
||||
"""Send SLA warning notification to incident chat room"""
|
||||
try:
|
||||
war_room = WarRoom.objects.filter(incident=sla_instance.incident).first()
|
||||
if not war_room:
|
||||
return False
|
||||
|
||||
# Calculate time remaining
|
||||
time_remaining = sla_instance.time_remaining
|
||||
time_remaining_minutes = int(time_remaining.total_seconds() / 60)
|
||||
|
||||
# Create warning message
|
||||
message_content = (
|
||||
f"🚨 **SLA Warning** 🚨\n\n"
|
||||
f"**SLA:** {sla_instance.sla_definition.name}\n"
|
||||
f"**Type:** {sla_instance.sla_definition.get_sla_type_display()}\n"
|
||||
f"**Time Remaining:** {time_remaining_minutes} minutes\n"
|
||||
f"**Threshold:** {threshold_percent}% reached\n\n"
|
||||
f"Please take immediate action to meet the SLA target."
|
||||
)
|
||||
|
||||
# Create system message
|
||||
WarRoomMessage.objects.create(
|
||||
war_room=war_room,
|
||||
content=message_content,
|
||||
message_type='ALERT',
|
||||
sender=None,
|
||||
sender_name='SLA Monitor',
|
||||
external_data={
|
||||
'sla_instance_id': str(sla_instance.id),
|
||||
'notification_type': 'sla_warning',
|
||||
'threshold_percent': threshold_percent
|
||||
}
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error sending SLA warning notification: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def send_sla_breach_notification(sla_instance: SLAInstance):
|
||||
"""Send SLA breach notification to incident chat room"""
|
||||
try:
|
||||
war_room = WarRoom.objects.filter(incident=sla_instance.incident).first()
|
||||
if not war_room:
|
||||
return False
|
||||
|
||||
# Calculate breach time
|
||||
breach_time = sla_instance.breach_time
|
||||
breach_minutes = int(breach_time.total_seconds() / 60)
|
||||
|
||||
# Create breach message
|
||||
message_content = (
|
||||
f"🚨 **SLA BREACHED** 🚨\n\n"
|
||||
f"**SLA:** {sla_instance.sla_definition.name}\n"
|
||||
f"**Type:** {sla_instance.sla_definition.get_sla_type_display()}\n"
|
||||
f"**Breach Time:** {breach_minutes} minutes ago\n"
|
||||
f"**Target Time:** {sla_instance.target_time.strftime('%Y-%m-%d %H:%M:%S')}\n\n"
|
||||
f"**IMMEDIATE ACTION REQUIRED**\n"
|
||||
f"Escalation procedures have been triggered."
|
||||
)
|
||||
|
||||
# Create system message
|
||||
WarRoomMessage.objects.create(
|
||||
war_room=war_room,
|
||||
content=message_content,
|
||||
message_type='ALERT',
|
||||
sender=None,
|
||||
sender_name='SLA Monitor',
|
||||
external_data={
|
||||
'sla_instance_id': str(sla_instance.id),
|
||||
'notification_type': 'sla_breach',
|
||||
'breach_minutes': breach_minutes
|
||||
}
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error sending SLA breach notification: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def send_escalation_notification(escalation_instance: EscalationInstance):
|
||||
"""Send escalation notification to incident chat room"""
|
||||
try:
|
||||
war_room = WarRoom.objects.filter(incident=escalation_instance.incident).first()
|
||||
if not war_room:
|
||||
return False
|
||||
|
||||
# Create escalation message
|
||||
message_content = (
|
||||
f"📢 **ESCALATION TRIGGERED** 📢\n\n"
|
||||
f"**Policy:** {escalation_instance.escalation_policy.name}\n"
|
||||
f"**Level:** {escalation_instance.escalation_level}\n"
|
||||
f"**Trigger:** {escalation_instance.escalation_policy.get_trigger_condition_display()}\n"
|
||||
f"**Time:** {escalation_instance.triggered_at.strftime('%Y-%m-%d %H:%M:%S')}\n\n"
|
||||
f"**Actions Taken:**\n"
|
||||
)
|
||||
|
||||
# Add actions taken
|
||||
for action in escalation_instance.actions_taken:
|
||||
message_content += f"• {action}\n"
|
||||
|
||||
# Create system message
|
||||
WarRoomMessage.objects.create(
|
||||
war_room=war_room,
|
||||
content=message_content,
|
||||
message_type='ALERT',
|
||||
sender=None,
|
||||
sender_name='Escalation System',
|
||||
external_data={
|
||||
'escalation_instance_id': str(escalation_instance.id),
|
||||
'notification_type': 'escalation',
|
||||
'escalation_level': escalation_instance.escalation_level
|
||||
}
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error sending escalation notification: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def send_oncall_handoff_notification(incident: Incident, old_oncall_user, new_oncall_user):
|
||||
"""Send on-call handoff notification to incident chat room"""
|
||||
try:
|
||||
war_room = WarRoom.objects.filter(incident=incident).first()
|
||||
if not war_room:
|
||||
return False
|
||||
|
||||
# Create handoff message
|
||||
message_content = (
|
||||
f"🔄 **ON-CALL HANDOFF** 🔄\n\n"
|
||||
f"**From:** {old_oncall_user.username if old_oncall_user else 'System'}\n"
|
||||
f"**To:** {new_oncall_user.username}\n"
|
||||
f"**Time:** {timezone.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
|
||||
f"Please review the incident status and continue response activities."
|
||||
)
|
||||
|
||||
# Create system message
|
||||
WarRoomMessage.objects.create(
|
||||
war_room=war_room,
|
||||
content=message_content,
|
||||
message_type='UPDATE',
|
||||
sender=None,
|
||||
sender_name='On-Call System',
|
||||
external_data={
|
||||
'notification_type': 'oncall_handoff',
|
||||
'old_oncall_user_id': str(old_oncall_user.id) if old_oncall_user else None,
|
||||
'new_oncall_user_id': str(new_oncall_user.id)
|
||||
}
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error sending on-call handoff notification: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def send_sla_met_notification(sla_instance: SLAInstance):
|
||||
"""Send SLA met notification to incident chat room"""
|
||||
try:
|
||||
war_room = WarRoom.objects.filter(incident=sla_instance.incident).first()
|
||||
if not war_room:
|
||||
return False
|
||||
|
||||
# Calculate response time
|
||||
response_time = sla_instance.response_time
|
||||
response_minutes = int(response_time.total_seconds() / 60) if response_time else 0
|
||||
|
||||
# Create success message
|
||||
message_content = (
|
||||
f"✅ **SLA MET** ✅\n\n"
|
||||
f"**SLA:** {sla_instance.sla_definition.name}\n"
|
||||
f"**Type:** {sla_instance.sla_definition.get_sla_type_display()}\n"
|
||||
f"**Response Time:** {response_minutes} minutes\n"
|
||||
f"**Target Time:** {sla_instance.target_time.strftime('%Y-%m-%d %H:%M:%S')}\n\n"
|
||||
f"Great job meeting the SLA target!"
|
||||
)
|
||||
|
||||
# Create system message
|
||||
WarRoomMessage.objects.create(
|
||||
war_room=war_room,
|
||||
content=message_content,
|
||||
message_type='UPDATE',
|
||||
sender=None,
|
||||
sender_name='SLA Monitor',
|
||||
external_data={
|
||||
'sla_instance_id': str(sla_instance.id),
|
||||
'notification_type': 'sla_met',
|
||||
'response_minutes': response_minutes
|
||||
}
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error sending SLA met notification: {e}")
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_sla_status_for_incident(incident: Incident) -> Dict[str, Any]:
|
||||
"""Get SLA status summary for an incident"""
|
||||
try:
|
||||
sla_instances = SLAInstance.objects.filter(incident=incident)
|
||||
|
||||
status_summary = {
|
||||
'total_slas': sla_instances.count(),
|
||||
'active_slas': sla_instances.filter(status='ACTIVE').count(),
|
||||
'met_slas': sla_instances.filter(status='MET').count(),
|
||||
'breached_slas': sla_instances.filter(status='BREACHED').count(),
|
||||
'sla_details': []
|
||||
}
|
||||
|
||||
for sla in sla_instances:
|
||||
sla_detail = {
|
||||
'id': str(sla.id),
|
||||
'name': sla.sla_definition.name,
|
||||
'type': sla.sla_definition.get_sla_type_display(),
|
||||
'status': sla.status,
|
||||
'target_time': sla.target_time.isoformat(),
|
||||
'time_remaining': int(sla.time_remaining.total_seconds() / 60) if sla.status == 'ACTIVE' else 0,
|
||||
'breach_time': int(sla.breach_time.total_seconds() / 60) if sla.is_breached else 0
|
||||
}
|
||||
status_summary['sla_details'].append(sla_detail)
|
||||
|
||||
return status_summary
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error getting SLA status: {e}")
|
||||
return {'error': str(e)}
|
||||
|
||||
@staticmethod
|
||||
def check_and_send_threshold_notifications():
|
||||
"""Check all active SLAs and send threshold notifications"""
|
||||
try:
|
||||
active_slas = SLAInstance.objects.filter(status='ACTIVE')
|
||||
notifications_sent = 0
|
||||
|
||||
for sla in active_slas:
|
||||
# Check if SLA is approaching threshold
|
||||
if sla.sla_definition.escalation_enabled:
|
||||
threshold_percent = sla.sla_definition.escalation_threshold_percent
|
||||
time_elapsed = timezone.now() - sla.started_at
|
||||
total_duration = sla.target_time - sla.started_at
|
||||
elapsed_percent = (time_elapsed.total_seconds() / total_duration.total_seconds()) * 100
|
||||
|
||||
if elapsed_percent >= threshold_percent:
|
||||
# Check if we haven't already sent a warning
|
||||
existing_warning = WarRoomMessage.objects.filter(
|
||||
war_room__incident=sla.incident,
|
||||
message_type='ALERT',
|
||||
external_data__notification_type='sla_warning',
|
||||
external_data__sla_instance_id=str(sla.id)
|
||||
).exists()
|
||||
|
||||
if not existing_warning:
|
||||
if SLANotificationService.send_sla_warning_notification(sla, threshold_percent):
|
||||
notifications_sent += 1
|
||||
|
||||
return notifications_sent
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error checking SLA thresholds: {e}")
|
||||
return 0
|
||||
Reference in New Issue
Block a user