Updates
This commit is contained in:
1
ETB-API/knowledge_learning/services/__init__.py
Normal file
1
ETB-API/knowledge_learning/services/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Knowledge Learning Services
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
422
ETB-API/knowledge_learning/services/knowledge_base_search.py
Normal file
422
ETB-API/knowledge_learning/services/knowledge_base_search.py
Normal file
@@ -0,0 +1,422 @@
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional
|
||||
from django.db.models import Q, Count
|
||||
from django.utils import timezone
|
||||
# from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
# from sklearn.metrics.pairwise import cosine_similarity
|
||||
import re
|
||||
|
||||
from ..models import KnowledgeBaseArticle, KnowledgeBaseUsage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KnowledgeBaseSearchService:
|
||||
"""Service for searching and discovering knowledge base articles"""
|
||||
|
||||
def __init__(self):
|
||||
self.model_version = "v1.0"
|
||||
self.min_similarity_threshold = 0.1
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
article_types: Optional[List[str]] = None,
|
||||
categories: Optional[List[str]] = None,
|
||||
difficulty_levels: Optional[List[str]] = None,
|
||||
limit: int = 20,
|
||||
offset: int = 0
|
||||
) -> Dict[str, Any]:
|
||||
"""Search knowledge base articles with various filters"""
|
||||
|
||||
try:
|
||||
# Build base queryset
|
||||
queryset = KnowledgeBaseArticle.objects.filter(status='PUBLISHED')
|
||||
|
||||
# Apply filters
|
||||
if article_types:
|
||||
queryset = queryset.filter(article_type__in=article_types)
|
||||
|
||||
if categories:
|
||||
queryset = queryset.filter(category__in=categories)
|
||||
|
||||
if difficulty_levels:
|
||||
queryset = queryset.filter(difficulty_level__in=difficulty_levels)
|
||||
|
||||
# Get all matching articles for similarity calculation
|
||||
all_articles = list(queryset)
|
||||
|
||||
if not all_articles:
|
||||
return {
|
||||
'results': [],
|
||||
'total_count': 0,
|
||||
'query': query,
|
||||
'filters': {
|
||||
'article_types': article_types,
|
||||
'categories': categories,
|
||||
'difficulty_levels': difficulty_levels
|
||||
}
|
||||
}
|
||||
|
||||
# Calculate similarity scores
|
||||
articles_with_scores = self._calculate_similarity_scores(query, all_articles)
|
||||
|
||||
# Sort by relevance (combination of similarity and popularity)
|
||||
articles_with_scores.sort(
|
||||
key=lambda x: (x['similarity_score'] * 0.7) + (x['popularity_score'] * 0.3),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
# Apply pagination
|
||||
paginated_articles = articles_with_scores[offset:offset + limit]
|
||||
|
||||
# Format results
|
||||
results = []
|
||||
for article_data in paginated_articles:
|
||||
article = article_data['article']
|
||||
results.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'slug': article.slug,
|
||||
'summary': article.summary,
|
||||
'article_type': article.article_type,
|
||||
'category': article.category,
|
||||
'subcategory': article.subcategory,
|
||||
'tags': article.tags,
|
||||
'difficulty_level': article.difficulty_level,
|
||||
'view_count': article.view_count,
|
||||
'created_at': article.created_at.isoformat(),
|
||||
'updated_at': article.updated_at.isoformat(),
|
||||
'author': article.author.username if article.author else None,
|
||||
'similarity_score': article_data['similarity_score'],
|
||||
'relevance_score': article_data['relevance_score'],
|
||||
'popularity_score': article_data['popularity_score'],
|
||||
'matching_keywords': article_data['matching_keywords']
|
||||
})
|
||||
|
||||
return {
|
||||
'results': results,
|
||||
'total_count': len(articles_with_scores),
|
||||
'query': query,
|
||||
'filters': {
|
||||
'article_types': article_types,
|
||||
'categories': categories,
|
||||
'difficulty_levels': difficulty_levels
|
||||
},
|
||||
'pagination': {
|
||||
'limit': limit,
|
||||
'offset': offset,
|
||||
'has_more': (offset + limit) < len(articles_with_scores)
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to search knowledge base: {str(e)}")
|
||||
raise
|
||||
|
||||
def find_related_articles(
|
||||
self,
|
||||
article_id: str,
|
||||
limit: int = 5
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Find articles related to a specific article"""
|
||||
|
||||
try:
|
||||
article = KnowledgeBaseArticle.objects.get(id=article_id)
|
||||
|
||||
# Find articles with similar categories, tags, or content
|
||||
related_articles = KnowledgeBaseArticle.objects.filter(
|
||||
status='PUBLISHED'
|
||||
).exclude(id=article_id).filter(
|
||||
Q(category=article.category) |
|
||||
Q(subcategory=article.subcategory) |
|
||||
Q(tags__overlap=article.tags) |
|
||||
Q(article_type=article.article_type)
|
||||
).distinct()
|
||||
|
||||
if not related_articles.exists():
|
||||
return []
|
||||
|
||||
# Calculate similarity scores
|
||||
article_text = f"{article.title} {article.summary} {' '.join(article.tags)}"
|
||||
articles_with_scores = []
|
||||
|
||||
for related_article in related_articles:
|
||||
related_text = f"{related_article.title} {related_article.summary} {' '.join(related_article.tags)}"
|
||||
similarity = self._calculate_text_similarity(article_text, related_text)
|
||||
|
||||
if similarity >= self.min_similarity_threshold:
|
||||
articles_with_scores.append({
|
||||
'article': related_article,
|
||||
'similarity_score': similarity
|
||||
})
|
||||
|
||||
# Sort by similarity and return top matches
|
||||
articles_with_scores.sort(key=lambda x: x['similarity_score'], reverse=True)
|
||||
|
||||
results = []
|
||||
for article_data in articles_with_scores[:limit]:
|
||||
article = article_data['article']
|
||||
results.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'slug': article.slug,
|
||||
'summary': article.summary,
|
||||
'article_type': article.article_type,
|
||||
'category': article.category,
|
||||
'similarity_score': article_data['similarity_score']
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
except KnowledgeBaseArticle.DoesNotExist:
|
||||
raise ValueError(f"Article with ID {article_id} not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to find related articles: {str(e)}")
|
||||
raise
|
||||
|
||||
def suggest_articles_for_incident(
|
||||
self,
|
||||
incident_title: str,
|
||||
incident_description: str,
|
||||
incident_category: str,
|
||||
limit: int = 5
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Suggest knowledge base articles for an incident"""
|
||||
|
||||
try:
|
||||
# Build search query from incident data
|
||||
search_query = f"{incident_title} {incident_description} {incident_category}"
|
||||
|
||||
# Search for relevant articles
|
||||
search_results = self.search(
|
||||
query=search_query,
|
||||
categories=[incident_category] if incident_category else None,
|
||||
limit=limit * 2 # Get more results to filter
|
||||
)
|
||||
|
||||
# Filter and rank results
|
||||
relevant_articles = []
|
||||
for result in search_results['results']:
|
||||
# Boost score for category matches
|
||||
category_boost = 0.2 if result['category'] == incident_category else 0.0
|
||||
|
||||
# Boost score for runbooks and troubleshooting guides
|
||||
type_boost = 0.1 if result['article_type'] in ['RUNBOOK', 'TROUBLESHOOTING'] else 0.0
|
||||
|
||||
final_score = result['similarity_score'] + category_boost + type_boost
|
||||
|
||||
if final_score >= self.min_similarity_threshold:
|
||||
relevant_articles.append({
|
||||
**result,
|
||||
'final_score': final_score
|
||||
})
|
||||
|
||||
# Sort by final score and return top matches
|
||||
relevant_articles.sort(key=lambda x: x['final_score'], reverse=True)
|
||||
|
||||
return relevant_articles[:limit]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to suggest articles for incident: {str(e)}")
|
||||
raise
|
||||
|
||||
def get_popular_articles(
|
||||
self,
|
||||
category: Optional[str] = None,
|
||||
article_type: Optional[str] = None,
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get popular articles based on view count and recent activity"""
|
||||
|
||||
try:
|
||||
queryset = KnowledgeBaseArticle.objects.filter(status='PUBLISHED')
|
||||
|
||||
if category:
|
||||
queryset = queryset.filter(category=category)
|
||||
|
||||
if article_type:
|
||||
queryset = queryset.filter(article_type=article_type)
|
||||
|
||||
# Get articles ordered by popularity (view count + recent activity)
|
||||
popular_articles = queryset.order_by('-view_count', '-updated_at')[:limit]
|
||||
|
||||
results = []
|
||||
for article in popular_articles:
|
||||
results.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'slug': article.slug,
|
||||
'summary': article.summary,
|
||||
'article_type': article.article_type,
|
||||
'category': article.category,
|
||||
'view_count': article.view_count,
|
||||
'updated_at': article.updated_at.isoformat(),
|
||||
'is_featured': article.is_featured
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get popular articles: {str(e)}")
|
||||
raise
|
||||
|
||||
def get_articles_due_for_review(self) -> List[Dict[str, Any]]:
|
||||
"""Get articles that are due for review"""
|
||||
|
||||
try:
|
||||
due_articles = KnowledgeBaseArticle.objects.filter(
|
||||
next_review_due__lt=timezone.now(),
|
||||
status='PUBLISHED'
|
||||
).order_by('next_review_due')
|
||||
|
||||
results = []
|
||||
for article in due_articles:
|
||||
results.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'slug': article.slug,
|
||||
'category': article.category,
|
||||
'last_reviewed': article.last_reviewed.isoformat() if article.last_reviewed else None,
|
||||
'next_review_due': article.next_review_due.isoformat(),
|
||||
'maintainer': article.maintainer.username if article.maintainer else None,
|
||||
'days_overdue': (timezone.now() - article.next_review_due).days
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get articles due for review: {str(e)}")
|
||||
raise
|
||||
|
||||
def _calculate_similarity_scores(
|
||||
self,
|
||||
query: str,
|
||||
articles: List[KnowledgeBaseArticle]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Calculate similarity scores for articles against a query"""
|
||||
|
||||
if not articles:
|
||||
return []
|
||||
|
||||
# Prepare texts for similarity calculation
|
||||
query_text = self._preprocess_text(query)
|
||||
article_texts = []
|
||||
|
||||
for article in articles:
|
||||
article_text = f"{article.title} {article.summary} {' '.join(article.tags)} {' '.join(article.search_keywords)}"
|
||||
article_texts.append(self._preprocess_text(article_text))
|
||||
|
||||
# Calculate similarity using simple keyword matching (fallback)
|
||||
try:
|
||||
similarities = [self._calculate_keyword_similarity(query, article_text) for article_text in article_texts]
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate similarity: {str(e)}")
|
||||
similarities = [0.0] * len(article_texts)
|
||||
|
||||
# Prepare results with additional scoring
|
||||
results = []
|
||||
for i, article in enumerate(articles):
|
||||
similarity_score = float(similarities[i])
|
||||
popularity_score = self._calculate_popularity_score(article)
|
||||
relevance_score = (similarity_score * 0.7) + (popularity_score * 0.3)
|
||||
|
||||
matching_keywords = self._find_matching_keywords(query, article)
|
||||
|
||||
results.append({
|
||||
'article': article,
|
||||
'similarity_score': similarity_score,
|
||||
'popularity_score': popularity_score,
|
||||
'relevance_score': relevance_score,
|
||||
'matching_keywords': matching_keywords
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def _calculate_text_similarity(self, text1: str, text2: str) -> float:
|
||||
"""Calculate text similarity using simple keyword matching (fallback)"""
|
||||
try:
|
||||
if not text1.strip() or not text2.strip():
|
||||
return 0.0
|
||||
|
||||
# Simple keyword-based similarity as fallback
|
||||
words1 = set(text1.lower().split())
|
||||
words2 = set(text2.lower().split())
|
||||
|
||||
if not words1 or not words2:
|
||||
return 0.0
|
||||
|
||||
intersection = words1.intersection(words2)
|
||||
union = words1.union(words2)
|
||||
|
||||
return len(intersection) / len(union) if union else 0.0
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate text similarity: {str(e)}")
|
||||
return 0.0
|
||||
|
||||
def _calculate_keyword_similarity(self, query: str, article_text: str) -> float:
|
||||
"""Fallback similarity calculation using keyword matching"""
|
||||
query_words = set(self._extract_keywords(query.lower()))
|
||||
article_words = set(self._extract_keywords(article_text.lower()))
|
||||
|
||||
if not query_words or not article_words:
|
||||
return 0.0
|
||||
|
||||
intersection = query_words.intersection(article_words)
|
||||
union = query_words.union(article_words)
|
||||
|
||||
return len(intersection) / len(union) if union else 0.0
|
||||
|
||||
def _calculate_popularity_score(self, article: KnowledgeBaseArticle) -> float:
|
||||
"""Calculate popularity score based on views and recency"""
|
||||
# Normalize view count (assuming max views is around 1000)
|
||||
view_score = min(article.view_count / 1000.0, 1.0)
|
||||
|
||||
# Calculate recency score (more recent = higher score)
|
||||
days_since_update = (timezone.now() - article.updated_at).days
|
||||
recency_score = max(0, 1 - (days_since_update / 365.0)) # Decay over a year
|
||||
|
||||
# Featured articles get a boost
|
||||
featured_boost = 0.1 if article.is_featured else 0.0
|
||||
|
||||
return (view_score * 0.6) + (recency_score * 0.3) + featured_boost
|
||||
|
||||
def _preprocess_text(self, text: str) -> str:
|
||||
"""Preprocess text for similarity calculation"""
|
||||
# Convert to lowercase
|
||||
text = text.lower()
|
||||
|
||||
# Remove special characters but keep spaces
|
||||
text = re.sub(r'[^\w\s]', ' ', text)
|
||||
|
||||
# Remove extra whitespace
|
||||
text = re.sub(r'\s+', ' ', text).strip()
|
||||
|
||||
return text
|
||||
|
||||
def _extract_keywords(self, text: str) -> List[str]:
|
||||
"""Extract keywords from text"""
|
||||
# Simple keyword extraction - in production, you might use more sophisticated methods
|
||||
words = text.split()
|
||||
|
||||
# Filter out common stop words
|
||||
stop_words = {
|
||||
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
|
||||
'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'have',
|
||||
'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should'
|
||||
}
|
||||
|
||||
keywords = [word for word in words if len(word) > 2 and word not in stop_words]
|
||||
return keywords
|
||||
|
||||
def _find_matching_keywords(self, query: str, article: KnowledgeBaseArticle) -> List[str]:
|
||||
"""Find keywords that match between query and article"""
|
||||
query_keywords = set(self._extract_keywords(query.lower()))
|
||||
|
||||
# Check article title, summary, tags, and search keywords
|
||||
article_text = f"{article.title} {article.summary} {' '.join(article.tags)} {' '.join(article.search_keywords)}"
|
||||
article_keywords = set(self._extract_keywords(article_text.lower()))
|
||||
|
||||
matching_keywords = list(query_keywords.intersection(article_keywords))
|
||||
return matching_keywords[:5] # Return top 5 matches
|
||||
361
ETB-API/knowledge_learning/services/postmortem_generator.py
Normal file
361
ETB-API/knowledge_learning/services/postmortem_generator.py
Normal file
@@ -0,0 +1,361 @@
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any, Optional
|
||||
from django.utils import timezone
|
||||
from django.db import transaction
|
||||
|
||||
from ..models import Postmortem, AutomatedPostmortemGeneration
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PostmortemGenerator:
|
||||
"""Service for generating automated postmortems from incident data"""
|
||||
|
||||
def __init__(self):
|
||||
self.model_version = "v1.0"
|
||||
|
||||
def generate_postmortem_for_incident(
|
||||
self,
|
||||
incident_id: str,
|
||||
include_timeline: bool = True,
|
||||
include_logs: bool = True,
|
||||
trigger: str = "manual"
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate a postmortem for a specific incident"""
|
||||
|
||||
try:
|
||||
incident = Incident.objects.get(id=incident_id)
|
||||
|
||||
# Create generation log
|
||||
generation_log = AutomatedPostmortemGeneration.objects.create(
|
||||
incident=incident,
|
||||
status='PROCESSING',
|
||||
generation_trigger=trigger,
|
||||
incident_data=self._extract_incident_data(incident),
|
||||
timeline_data=self._extract_timeline_data(incident) if include_timeline else [],
|
||||
log_data=self._extract_log_data(incident) if include_logs else []
|
||||
)
|
||||
|
||||
# Generate postmortem content
|
||||
generated_content = self._generate_content(incident, generation_log)
|
||||
|
||||
# Create postmortem
|
||||
postmortem = Postmortem.objects.create(
|
||||
title=f"Postmortem: {incident.title}",
|
||||
incident=incident,
|
||||
executive_summary=generated_content.get('executive_summary', ''),
|
||||
timeline=generated_content.get('timeline', []),
|
||||
root_cause_analysis=generated_content.get('root_cause_analysis', ''),
|
||||
impact_assessment=generated_content.get('impact_assessment', ''),
|
||||
lessons_learned=generated_content.get('lessons_learned', ''),
|
||||
action_items=generated_content.get('action_items', []),
|
||||
is_automated=True,
|
||||
generation_confidence=generated_content.get('confidence_score', 0.0),
|
||||
auto_generated_sections=generated_content.get('generated_sections', []),
|
||||
status='DRAFT',
|
||||
severity=incident.severity,
|
||||
affected_services=self._extract_affected_services(incident),
|
||||
affected_teams=self._extract_affected_teams(incident),
|
||||
due_date=timezone.now() + timedelta(days=7) # Due in 7 days
|
||||
)
|
||||
|
||||
# Update generation log
|
||||
generation_log.status = 'COMPLETED'
|
||||
generation_log.generated_content = generated_content
|
||||
generation_log.generated_postmortem = postmortem
|
||||
generation_log.confidence_scores = generated_content.get('confidence_scores', {})
|
||||
generation_log.quality_metrics = generated_content.get('quality_metrics', {})
|
||||
generation_log.completed_at = timezone.now()
|
||||
generation_log.save()
|
||||
|
||||
return {
|
||||
'generation_id': str(generation_log.id),
|
||||
'postmortem_id': str(postmortem.id),
|
||||
'status': 'completed',
|
||||
'confidence_score': postmortem.generation_confidence
|
||||
}
|
||||
|
||||
except Incident.DoesNotExist:
|
||||
raise ValueError(f"Incident with ID {incident_id} not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate postmortem for incident {incident_id}: {str(e)}")
|
||||
|
||||
# Update generation log with error
|
||||
if 'generation_log' in locals():
|
||||
generation_log.status = 'FAILED'
|
||||
generation_log.error_message = str(e)
|
||||
generation_log.completed_at = timezone.now()
|
||||
generation_log.save()
|
||||
|
||||
raise
|
||||
|
||||
def generate_postmortem(self, incident: Incident) -> Dict[str, Any]:
|
||||
"""Generate postmortem content for an incident"""
|
||||
return self._generate_content(incident, None)
|
||||
|
||||
def _generate_content(self, incident: Incident, generation_log: Optional[AutomatedPostmortemGeneration] = None) -> Dict[str, Any]:
|
||||
"""Generate the actual postmortem content using AI/ML models"""
|
||||
|
||||
# This is a simplified implementation - in production, you would integrate with
|
||||
# actual AI/ML services like OpenAI, Azure Cognitive Services, or custom models
|
||||
|
||||
content = {
|
||||
'executive_summary': self._generate_executive_summary(incident),
|
||||
'timeline': self._generate_timeline(incident),
|
||||
'root_cause_analysis': self._generate_root_cause_analysis(incident),
|
||||
'impact_assessment': self._generate_impact_assessment(incident),
|
||||
'lessons_learned': self._generate_lessons_learned(incident),
|
||||
'action_items': self._generate_action_items(incident),
|
||||
'confidence_score': self._calculate_confidence_score(incident),
|
||||
'generated_sections': ['executive_summary', 'timeline', 'root_cause_analysis', 'impact_assessment', 'lessons_learned', 'action_items'],
|
||||
'confidence_scores': {
|
||||
'executive_summary': 0.85,
|
||||
'timeline': 0.90,
|
||||
'root_cause_analysis': 0.75,
|
||||
'impact_assessment': 0.80,
|
||||
'lessons_learned': 0.70,
|
||||
'action_items': 0.75
|
||||
},
|
||||
'quality_metrics': {
|
||||
'completeness': 0.85,
|
||||
'accuracy': 0.80,
|
||||
'actionability': 0.75
|
||||
}
|
||||
}
|
||||
|
||||
return content
|
||||
|
||||
def _generate_executive_summary(self, incident: Incident) -> str:
|
||||
"""Generate executive summary"""
|
||||
return f"""
|
||||
On {incident.created_at.strftime('%Y-%m-%d %H:%M')}, a {incident.severity.lower()} severity incident occurred affecting {incident.affected_users} users.
|
||||
The incident was categorized as {incident.category} and was resolved after {self._calculate_resolution_time(incident)}.
|
||||
|
||||
Key Impact:
|
||||
- {incident.affected_users} users affected
|
||||
- Business impact: {incident.business_impact or 'Not specified'}
|
||||
- Resolution time: {self._calculate_resolution_time(incident)}
|
||||
|
||||
This postmortem outlines the timeline, root causes, and preventive measures to avoid similar incidents in the future.
|
||||
"""
|
||||
|
||||
def _generate_timeline(self, incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Generate incident timeline"""
|
||||
timeline = [
|
||||
{
|
||||
'timestamp': incident.created_at.isoformat(),
|
||||
'event': 'Incident reported',
|
||||
'description': f'Incident "{incident.title}" was reported',
|
||||
'actor': incident.reporter.username if incident.reporter else 'System'
|
||||
}
|
||||
]
|
||||
|
||||
if incident.assigned_to:
|
||||
timeline.append({
|
||||
'timestamp': incident.updated_at.isoformat(),
|
||||
'event': 'Incident assigned',
|
||||
'description': f'Incident assigned to {incident.assigned_to.username}',
|
||||
'actor': 'System'
|
||||
})
|
||||
|
||||
if incident.resolved_at:
|
||||
timeline.append({
|
||||
'timestamp': incident.resolved_at.isoformat(),
|
||||
'event': 'Incident resolved',
|
||||
'description': f'Incident resolved with status: {incident.status}',
|
||||
'actor': incident.assigned_to.username if incident.assigned_to else 'System'
|
||||
})
|
||||
|
||||
return timeline
|
||||
|
||||
def _generate_root_cause_analysis(self, incident: Incident) -> str:
|
||||
"""Generate root cause analysis"""
|
||||
return f"""
|
||||
Root Cause Analysis for {incident.title}:
|
||||
|
||||
Primary Factors:
|
||||
1. Technical Issue: {incident.category} - {incident.subcategory or 'Not specified'}
|
||||
2. System Component: {incident.description[:200]}...
|
||||
3. User Impact: {incident.affected_users} users affected
|
||||
|
||||
Contributing Factors:
|
||||
- Incident severity: {incident.severity}
|
||||
- Priority level: {incident.priority}
|
||||
- Resolution time: {self._calculate_resolution_time(incident)}
|
||||
|
||||
Analysis:
|
||||
Based on the incident details and timeline, the root cause appears to be related to {incident.category}.
|
||||
The incident was classified with {incident.classification_confidence or 0.0:.2%} confidence,
|
||||
suggesting {incident.subcategory or 'a system component failure'} as the primary cause.
|
||||
|
||||
Recommendations for further investigation:
|
||||
1. Review system logs for the time period {incident.created_at} to {incident.resolved_at or incident.updated_at}
|
||||
2. Analyze similar incidents in the past 30 days
|
||||
3. Check for any recent deployments or configuration changes
|
||||
"""
|
||||
|
||||
def _generate_impact_assessment(self, incident: Incident) -> str:
|
||||
"""Generate impact assessment"""
|
||||
return f"""
|
||||
Impact Assessment for {incident.title}:
|
||||
|
||||
User Impact:
|
||||
- Total users affected: {incident.affected_users}
|
||||
- Severity level: {incident.severity}
|
||||
- Duration of impact: {self._calculate_resolution_time(incident)}
|
||||
|
||||
Business Impact:
|
||||
{incident.business_impact or 'Business impact not specified in incident details.'}
|
||||
|
||||
Technical Impact:
|
||||
- Affected services: {', '.join(self._extract_affected_services(incident))}
|
||||
- System components: {incident.category} - {incident.subcategory or 'Not specified'}
|
||||
- Estimated downtime: {incident.estimated_downtime or 'Not specified'}
|
||||
|
||||
Financial Impact:
|
||||
- Estimated cost: To be calculated based on user impact and downtime
|
||||
- SLA impact: {'SLA override applied' if incident.sla_override else 'Normal SLA applies'}
|
||||
|
||||
Reputation Impact:
|
||||
- Customer satisfaction: Potentially affected due to service disruption
|
||||
- Trust level: Impact depends on resolution time and communication
|
||||
"""
|
||||
|
||||
def _generate_lessons_learned(self, incident: Incident) -> str:
|
||||
"""Generate lessons learned"""
|
||||
return f"""
|
||||
Lessons Learned from {incident.title}:
|
||||
|
||||
What Went Well:
|
||||
1. Incident was properly categorized as {incident.category}
|
||||
2. {'Automated remediation was attempted' if incident.auto_remediation_attempted else 'Manual intervention was required'}
|
||||
3. {'Runbook was suggested' if incident.runbook_suggested else 'No runbook was available'}
|
||||
|
||||
What Could Be Improved:
|
||||
1. {'Faster detection and response time needed' if self._calculate_resolution_time(incident) > timedelta(hours=1) else 'Response time was acceptable'}
|
||||
2. {'Better automation coverage needed' if not incident.auto_remediation_attempted else 'Automation worked as expected'}
|
||||
3. {'More detailed incident description needed' if len(incident.description) < 100 else 'Incident description was adequate'}
|
||||
|
||||
Key Insights:
|
||||
1. {incident.category} incidents require {'immediate' if incident.severity in ['CRITICAL', 'EMERGENCY'] else 'standard'} response procedures
|
||||
2. {'Automation can help' if incident.automation_enabled else 'Manual processes need improvement'} in similar scenarios
|
||||
3. {'SLA override was necessary' if incident.sla_override else 'Standard SLA procedures were sufficient'}
|
||||
|
||||
Process Improvements:
|
||||
1. Review and update runbooks for {incident.category} incidents
|
||||
2. {'Enhance monitoring' if incident.severity in ['HIGH', 'CRITICAL', 'EMERGENCY'] else 'Maintain current monitoring'} for early detection
|
||||
3. {'Improve automation' if not incident.auto_remediation_attempted else 'Automation is working well'} for faster resolution
|
||||
"""
|
||||
|
||||
def _generate_action_items(self, incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Generate action items"""
|
||||
action_items = [
|
||||
{
|
||||
'title': f'Review and update {incident.category} runbook',
|
||||
'description': f'Update the runbook for {incident.category} incidents based on lessons learned',
|
||||
'priority': 'HIGH' if incident.severity in ['CRITICAL', 'EMERGENCY'] else 'MEDIUM',
|
||||
'assignee': 'TBD',
|
||||
'due_date': (timezone.now() + timedelta(days=14)).isoformat(),
|
||||
'category': 'Process Improvement'
|
||||
},
|
||||
{
|
||||
'title': 'Enhance monitoring and alerting',
|
||||
'description': f'Improve monitoring for {incident.category} to detect similar issues earlier',
|
||||
'priority': 'MEDIUM',
|
||||
'assignee': 'TBD',
|
||||
'due_date': (timezone.now() + timedelta(days=21)).isoformat(),
|
||||
'category': 'Technical Improvement'
|
||||
}
|
||||
]
|
||||
|
||||
if not incident.auto_remediation_attempted and incident.automation_enabled:
|
||||
action_items.append({
|
||||
'title': 'Implement automated remediation',
|
||||
'description': f'Develop automated remediation for {incident.category} incidents',
|
||||
'priority': 'HIGH' if incident.severity in ['CRITICAL', 'EMERGENCY'] else 'MEDIUM',
|
||||
'assignee': 'TBD',
|
||||
'due_date': (timezone.now() + timedelta(days=30)).isoformat(),
|
||||
'category': 'Automation'
|
||||
})
|
||||
|
||||
return action_items
|
||||
|
||||
def _calculate_confidence_score(self, incident: Incident) -> float:
|
||||
"""Calculate overall confidence score for the generated postmortem"""
|
||||
base_confidence = 0.7
|
||||
|
||||
# Adjust based on incident data quality
|
||||
if incident.classification_confidence:
|
||||
base_confidence += incident.classification_confidence * 0.2
|
||||
|
||||
if len(incident.description) > 200:
|
||||
base_confidence += 0.1
|
||||
|
||||
if incident.business_impact:
|
||||
base_confidence += 0.1
|
||||
|
||||
return min(base_confidence, 1.0)
|
||||
|
||||
def _calculate_resolution_time(self, incident: Incident) -> str:
|
||||
"""Calculate resolution time"""
|
||||
if incident.resolved_at and incident.created_at:
|
||||
duration = incident.resolved_at - incident.created_at
|
||||
return str(duration)
|
||||
return "Not resolved yet"
|
||||
|
||||
def _extract_incident_data(self, incident: Incident) -> Dict[str, Any]:
|
||||
"""Extract relevant incident data for postmortem generation"""
|
||||
return {
|
||||
'id': str(incident.id),
|
||||
'title': incident.title,
|
||||
'description': incident.description,
|
||||
'category': incident.category,
|
||||
'subcategory': incident.subcategory,
|
||||
'severity': incident.severity,
|
||||
'priority': incident.priority,
|
||||
'status': incident.status,
|
||||
'affected_users': incident.affected_users,
|
||||
'business_impact': incident.business_impact,
|
||||
'created_at': incident.created_at.isoformat(),
|
||||
'resolved_at': incident.resolved_at.isoformat() if incident.resolved_at else None,
|
||||
'assigned_to': incident.assigned_to.username if incident.assigned_to else None,
|
||||
'reporter': incident.reporter.username if incident.reporter else None
|
||||
}
|
||||
|
||||
def _extract_timeline_data(self, incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Extract timeline data from incident"""
|
||||
# In a real implementation, this would extract from incident logs, comments, etc.
|
||||
return self._generate_timeline(incident)
|
||||
|
||||
def _extract_log_data(self, incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Extract relevant log data for the incident"""
|
||||
# In a real implementation, this would query log systems
|
||||
return [
|
||||
{
|
||||
'timestamp': incident.created_at.isoformat(),
|
||||
'level': 'ERROR',
|
||||
'message': f'Incident {incident.title} reported',
|
||||
'source': 'incident_system'
|
||||
}
|
||||
]
|
||||
|
||||
def _extract_affected_services(self, incident: Incident) -> List[str]:
|
||||
"""Extract affected services from incident"""
|
||||
services = []
|
||||
if incident.category:
|
||||
services.append(incident.category)
|
||||
if incident.subcategory:
|
||||
services.append(incident.subcategory)
|
||||
return services
|
||||
|
||||
def _extract_affected_teams(self, incident: Incident) -> List[str]:
|
||||
"""Extract affected teams from incident"""
|
||||
teams = []
|
||||
if incident.assigned_to:
|
||||
teams.append(incident.assigned_to.username)
|
||||
if incident.reporter:
|
||||
teams.append(incident.reporter.username)
|
||||
return teams
|
||||
458
ETB-API/knowledge_learning/services/recommendation_engine.py
Normal file
458
ETB-API/knowledge_learning/services/recommendation_engine.py
Normal file
@@ -0,0 +1,458 @@
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
from django.db.models import Q, Count, Avg
|
||||
from django.utils import timezone
|
||||
# from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
# from sklearn.metrics.pairwise import cosine_similarity
|
||||
# import numpy as np
|
||||
|
||||
from ..models import IncidentRecommendation, KnowledgeBaseArticle, LearningPattern
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RecommendationEngine:
|
||||
"""Service for generating incident recommendations based on similarity and patterns"""
|
||||
|
||||
def __init__(self):
|
||||
self.model_version = "v1.0"
|
||||
self.min_similarity_threshold = 0.3
|
||||
self.max_recommendations = 10
|
||||
|
||||
def generate_recommendations(
|
||||
self,
|
||||
incident_id: str,
|
||||
recommendation_types: Optional[List[str]] = None,
|
||||
max_recommendations: int = 5,
|
||||
min_confidence: float = 0.5
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Generate recommendations for a specific incident"""
|
||||
|
||||
try:
|
||||
incident = Incident.objects.get(id=incident_id)
|
||||
recommendations = []
|
||||
|
||||
if not recommendation_types:
|
||||
recommendation_types = [
|
||||
'SIMILAR_INCIDENT', 'KNOWLEDGE_ARTICLE', 'SOLUTION',
|
||||
'EXPERT', 'PREVENTION'
|
||||
]
|
||||
|
||||
# Generate different types of recommendations
|
||||
for rec_type in recommendation_types:
|
||||
if rec_type == 'SIMILAR_INCIDENT':
|
||||
similar_incidents = self._find_similar_incidents(incident, max_recommendations)
|
||||
recommendations.extend(similar_incidents)
|
||||
|
||||
elif rec_type == 'KNOWLEDGE_ARTICLE':
|
||||
knowledge_articles = self._find_relevant_knowledge_articles(incident, max_recommendations)
|
||||
recommendations.extend(knowledge_articles)
|
||||
|
||||
elif rec_type == 'SOLUTION':
|
||||
solutions = self._find_solutions(incident, max_recommendations)
|
||||
recommendations.extend(solutions)
|
||||
|
||||
elif rec_type == 'EXPERT':
|
||||
experts = self._find_experts(incident, max_recommendations)
|
||||
recommendations.extend(experts)
|
||||
|
||||
elif rec_type == 'PREVENTION':
|
||||
prevention_strategies = self._find_prevention_strategies(incident, max_recommendations)
|
||||
recommendations.extend(prevention_strategies)
|
||||
|
||||
# Filter by minimum confidence and sort by confidence score
|
||||
recommendations = [
|
||||
rec for rec in recommendations
|
||||
if rec['confidence_score'] >= min_confidence
|
||||
]
|
||||
recommendations.sort(key=lambda x: x['confidence_score'], reverse=True)
|
||||
|
||||
# Save recommendations to database
|
||||
saved_recommendations = []
|
||||
for rec_data in recommendations[:max_recommendations]:
|
||||
recommendation = self._save_recommendation(incident, rec_data)
|
||||
saved_recommendations.append({
|
||||
'id': str(recommendation.id),
|
||||
'title': recommendation.title,
|
||||
'type': recommendation.recommendation_type,
|
||||
'confidence_score': recommendation.confidence_score,
|
||||
'similarity_score': recommendation.similarity_score
|
||||
})
|
||||
|
||||
return saved_recommendations
|
||||
|
||||
except Incident.DoesNotExist:
|
||||
raise ValueError(f"Incident with ID {incident_id} not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate recommendations for incident {incident_id}: {str(e)}")
|
||||
raise
|
||||
|
||||
def _find_similar_incidents(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find similar incidents based on content and metadata"""
|
||||
|
||||
# Get resolved incidents from the past 6 months
|
||||
six_months_ago = timezone.now() - timedelta(days=180)
|
||||
similar_incidents = Incident.objects.filter(
|
||||
status__in=['RESOLVED', 'CLOSED'],
|
||||
created_at__gte=six_months_ago
|
||||
).exclude(id=incident.id)
|
||||
|
||||
if not similar_incidents.exists():
|
||||
return []
|
||||
|
||||
# Calculate similarity scores
|
||||
incident_text = f"{incident.title} {incident.description} {incident.category} {incident.subcategory or ''}"
|
||||
|
||||
similarities = []
|
||||
for similar_incident in similar_incidents:
|
||||
similar_text = f"{similar_incident.title} {similar_incident.description} {similar_incident.category} {similar_incident.subcategory or ''}"
|
||||
|
||||
# Calculate text similarity
|
||||
text_similarity = self._calculate_text_similarity(incident_text, similar_text)
|
||||
|
||||
# Calculate metadata similarity
|
||||
metadata_similarity = self._calculate_metadata_similarity(incident, similar_incident)
|
||||
|
||||
# Combined similarity score
|
||||
combined_similarity = (text_similarity * 0.7) + (metadata_similarity * 0.3)
|
||||
|
||||
if combined_similarity >= self.min_similarity_threshold:
|
||||
similarities.append({
|
||||
'incident': similar_incident,
|
||||
'similarity_score': combined_similarity,
|
||||
'text_similarity': text_similarity,
|
||||
'metadata_similarity': metadata_similarity
|
||||
})
|
||||
|
||||
# Sort by similarity and return top matches
|
||||
similarities.sort(key=lambda x: x['similarity_score'], reverse=True)
|
||||
|
||||
recommendations = []
|
||||
for sim_data in similarities[:limit]:
|
||||
similar_incident = sim_data['incident']
|
||||
|
||||
recommendations.append({
|
||||
'recommendation_type': 'SIMILAR_INCIDENT',
|
||||
'title': f'Similar Incident: {similar_incident.title}',
|
||||
'description': f'This incident is similar to {similar_incident.title} which was resolved on {similar_incident.resolved_at.strftime("%Y-%m-%d") if similar_incident.resolved_at else "unknown date"}.',
|
||||
'similarity_score': sim_data['similarity_score'],
|
||||
'confidence_score': min(sim_data['similarity_score'] * 1.2, 1.0),
|
||||
'confidence_level': self._get_confidence_level(sim_data['similarity_score']),
|
||||
'related_incident_id': str(similar_incident.id),
|
||||
'suggested_actions': [
|
||||
f'Review how {similar_incident.title} was resolved',
|
||||
'Check if the same resolution approach applies',
|
||||
'Contact the incident assignee for insights'
|
||||
],
|
||||
'expected_outcome': 'Faster resolution by applying proven solutions',
|
||||
'reasoning': f'Incidents are similar based on content ({sim_data["text_similarity"]:.2%}) and metadata ({sim_data["metadata_similarity"]:.2%})',
|
||||
'matching_factors': [
|
||||
f'Category: {similar_incident.category}',
|
||||
f'Severity: {similar_incident.severity}',
|
||||
f'Text similarity: {sim_data["text_similarity"]:.2%}'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _find_relevant_knowledge_articles(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find relevant knowledge base articles"""
|
||||
|
||||
# Search for articles by category and keywords
|
||||
articles = KnowledgeBaseArticle.objects.filter(
|
||||
status='PUBLISHED'
|
||||
).order_by('-view_count', '-updated_at')
|
||||
|
||||
if not articles.exists():
|
||||
return []
|
||||
|
||||
# Calculate relevance scores
|
||||
incident_text = f"{incident.title} {incident.description} {incident.category}"
|
||||
|
||||
recommendations = []
|
||||
for article in articles[:limit]:
|
||||
article_text = f"{article.title} {article.summary} {' '.join(article.tags)}"
|
||||
|
||||
# Calculate text similarity
|
||||
similarity = self._calculate_text_similarity(incident_text, article_text)
|
||||
|
||||
if similarity >= self.min_similarity_threshold:
|
||||
recommendations.append({
|
||||
'recommendation_type': 'KNOWLEDGE_ARTICLE',
|
||||
'title': f'Knowledge Article: {article.title}',
|
||||
'description': f'This knowledge base article may help resolve the incident: {article.summary}',
|
||||
'similarity_score': similarity,
|
||||
'confidence_score': min(similarity * 1.1, 1.0),
|
||||
'confidence_level': self._get_confidence_level(similarity),
|
||||
'knowledge_article_id': str(article.id),
|
||||
'suggested_actions': [
|
||||
f'Read the article: {article.title}',
|
||||
'Follow the procedures outlined in the article',
|
||||
'Apply the troubleshooting steps if applicable'
|
||||
],
|
||||
'expected_outcome': 'Faster resolution using documented procedures',
|
||||
'reasoning': f'Article is relevant based on content similarity ({similarity:.2%}) and category match',
|
||||
'matching_factors': [
|
||||
f'Category: {article.category}',
|
||||
f'Type: {article.article_type}',
|
||||
f'Difficulty: {article.difficulty_level}',
|
||||
f'Views: {article.view_count}'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _find_solutions(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find solutions from resolved similar incidents"""
|
||||
|
||||
# Look for resolved incidents with similar characteristics
|
||||
six_months_ago = timezone.now() - timedelta(days=180)
|
||||
resolved_incidents = Incident.objects.filter(
|
||||
status__in=['RESOLVED', 'CLOSED'],
|
||||
category=incident.category,
|
||||
created_at__gte=six_months_ago
|
||||
).exclude(id=incident.id)
|
||||
|
||||
if not resolved_incidents.exists():
|
||||
return []
|
||||
|
||||
recommendations = []
|
||||
for resolved_incident in resolved_incidents[:limit]:
|
||||
# Calculate how quickly it was resolved
|
||||
resolution_time = None
|
||||
if resolved_incident.resolved_at:
|
||||
resolution_time = resolved_incident.resolved_at - resolved_incident.created_at
|
||||
|
||||
recommendations.append({
|
||||
'recommendation_type': 'SOLUTION',
|
||||
'title': f'Solution from {resolved_incident.title}',
|
||||
'description': f'A similar incident was resolved in {resolution_time} using standard procedures.',
|
||||
'similarity_score': 0.8, # High similarity for same category
|
||||
'confidence_score': 0.85,
|
||||
'confidence_level': 'HIGH',
|
||||
'related_incident_id': str(resolved_incident.id),
|
||||
'suggested_actions': [
|
||||
'Follow the same resolution approach used for the similar incident',
|
||||
'Check if the same root cause applies',
|
||||
'Apply any documented solutions from the incident'
|
||||
],
|
||||
'expected_outcome': 'Faster resolution using proven solutions',
|
||||
'reasoning': f'Similar incident in same category was resolved successfully',
|
||||
'matching_factors': [
|
||||
f'Category: {resolved_incident.category}',
|
||||
f'Resolution time: {resolution_time}',
|
||||
f'Assigned to: {resolved_incident.assigned_to.username if resolved_incident.assigned_to else "Unknown"}'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _find_experts(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find experts who have worked on similar incidents"""
|
||||
|
||||
# Find users who have resolved similar incidents
|
||||
six_months_ago = timezone.now() - timedelta(days=180)
|
||||
expert_incidents = Incident.objects.filter(
|
||||
status__in=['RESOLVED', 'CLOSED'],
|
||||
category=incident.category,
|
||||
assigned_to__isnull=False,
|
||||
created_at__gte=six_months_ago
|
||||
).exclude(id=incident.id)
|
||||
|
||||
# Count incidents per expert
|
||||
expert_counts = {}
|
||||
for expert_incident in expert_incidents:
|
||||
expert = expert_incident.assigned_to
|
||||
if expert not in expert_counts:
|
||||
expert_counts[expert] = {
|
||||
'count': 0,
|
||||
'avg_resolution_time': timedelta(),
|
||||
'incidents': []
|
||||
}
|
||||
expert_counts[expert]['count'] += 1
|
||||
expert_counts[expert]['incidents'].append(expert_incident)
|
||||
|
||||
if expert_incident.resolved_at:
|
||||
resolution_time = expert_incident.resolved_at - expert_incident.created_at
|
||||
expert_counts[expert]['avg_resolution_time'] += resolution_time
|
||||
|
||||
# Calculate average resolution times
|
||||
for expert in expert_counts:
|
||||
if expert_counts[expert]['count'] > 0:
|
||||
expert_counts[expert]['avg_resolution_time'] /= expert_counts[expert]['count']
|
||||
|
||||
# Sort experts by experience and create recommendations
|
||||
sorted_experts = sorted(
|
||||
expert_counts.items(),
|
||||
key=lambda x: (x[1]['count'], -x[1]['avg_resolution_time'].total_seconds()),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
recommendations = []
|
||||
for expert, stats in sorted_experts[:limit]:
|
||||
recommendations.append({
|
||||
'recommendation_type': 'EXPERT',
|
||||
'title': f'Expert: {expert.username}',
|
||||
'description': f'{expert.username} has resolved {stats["count"]} similar incidents with an average resolution time of {stats["avg_resolution_time"]}.',
|
||||
'similarity_score': 0.9, # High similarity for category experts
|
||||
'confidence_score': min(0.7 + (stats['count'] * 0.05), 1.0),
|
||||
'confidence_level': 'HIGH' if stats['count'] >= 3 else 'MEDIUM',
|
||||
'suggested_expert_id': str(expert.id),
|
||||
'suggested_actions': [
|
||||
f'Contact {expert.username} for assistance',
|
||||
'Ask about their experience with similar incidents',
|
||||
'Request guidance on resolution approach'
|
||||
],
|
||||
'expected_outcome': 'Expert guidance for faster resolution',
|
||||
'reasoning': f'Expert has {stats["count"]} successful resolutions in this category',
|
||||
'matching_factors': [
|
||||
f'Category experience: {stats["count"]} incidents',
|
||||
f'Average resolution time: {stats["avg_resolution_time"]}',
|
||||
f'Success rate: 100% (all incidents resolved)'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _find_prevention_strategies(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find prevention strategies from learning patterns"""
|
||||
|
||||
# Find relevant learning patterns
|
||||
patterns = LearningPattern.objects.filter(
|
||||
is_validated=True,
|
||||
pattern_type__in=['PREVENTION', 'ROOT_CAUSE'],
|
||||
source_incidents__category=incident.category
|
||||
).distinct().order_by('-confidence_score', '-frequency')
|
||||
|
||||
if not patterns.exists():
|
||||
return []
|
||||
|
||||
recommendations = []
|
||||
for pattern in patterns[:limit]:
|
||||
recommendations.append({
|
||||
'recommendation_type': 'PREVENTION',
|
||||
'title': f'Prevention Strategy: {pattern.name}',
|
||||
'description': f'This prevention pattern has been validated and applied {pattern.times_applied} times with a {pattern.success_rate:.1%} success rate.',
|
||||
'similarity_score': 0.8,
|
||||
'confidence_score': pattern.confidence_score,
|
||||
'confidence_level': self._get_confidence_level(pattern.confidence_score),
|
||||
'suggested_actions': pattern.actions[:3], # Top 3 actions
|
||||
'expected_outcome': f'Prevent similar incidents using validated pattern (success rate: {pattern.success_rate:.1%})',
|
||||
'reasoning': f'Validated pattern with {pattern.frequency} observations and {pattern.success_rate:.1%} success rate',
|
||||
'matching_factors': [
|
||||
f'Pattern type: {pattern.pattern_type}',
|
||||
f'Frequency: {pattern.frequency} observations',
|
||||
f'Success rate: {pattern.success_rate:.1%}',
|
||||
f'Validation: Expert validated'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _calculate_text_similarity(self, text1: str, text2: str) -> float:
|
||||
"""Calculate text similarity using simple keyword matching (fallback)"""
|
||||
try:
|
||||
if not text1.strip() or not text2.strip():
|
||||
return 0.0
|
||||
|
||||
# Simple keyword-based similarity as fallback
|
||||
words1 = set(text1.lower().split())
|
||||
words2 = set(text2.lower().split())
|
||||
|
||||
if not words1 or not words2:
|
||||
return 0.0
|
||||
|
||||
intersection = words1.intersection(words2)
|
||||
union = words1.union(words2)
|
||||
|
||||
return len(intersection) / len(union) if union else 0.0
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate text similarity: {str(e)}")
|
||||
return 0.0
|
||||
|
||||
def _calculate_metadata_similarity(self, incident1: Incident, incident2: Incident) -> float:
|
||||
"""Calculate similarity based on incident metadata"""
|
||||
similarity = 0.0
|
||||
|
||||
# Category similarity
|
||||
if incident1.category == incident2.category:
|
||||
similarity += 0.4
|
||||
|
||||
# Subcategory similarity
|
||||
if incident1.subcategory == incident2.subcategory and incident1.subcategory:
|
||||
similarity += 0.3
|
||||
|
||||
# Severity similarity
|
||||
severity_weights = {'LOW': 1, 'MEDIUM': 2, 'HIGH': 3, 'CRITICAL': 4, 'EMERGENCY': 5}
|
||||
if incident1.severity in severity_weights and incident2.severity in severity_weights:
|
||||
severity_diff = abs(severity_weights[incident1.severity] - severity_weights[incident2.severity])
|
||||
severity_similarity = max(0, 1 - (severity_diff / 4))
|
||||
similarity += severity_similarity * 0.2
|
||||
|
||||
# Priority similarity
|
||||
if incident1.priority == incident2.priority:
|
||||
similarity += 0.1
|
||||
|
||||
return min(similarity, 1.0)
|
||||
|
||||
def _get_confidence_level(self, score: float) -> str:
|
||||
"""Convert confidence score to confidence level"""
|
||||
if score >= 0.8:
|
||||
return 'VERY_HIGH'
|
||||
elif score >= 0.6:
|
||||
return 'HIGH'
|
||||
elif score >= 0.4:
|
||||
return 'MEDIUM'
|
||||
else:
|
||||
return 'LOW'
|
||||
|
||||
def _save_recommendation(self, incident: Incident, rec_data: Dict[str, Any]) -> IncidentRecommendation:
|
||||
"""Save recommendation to database"""
|
||||
|
||||
# Get related objects
|
||||
related_incident = None
|
||||
if 'related_incident_id' in rec_data:
|
||||
try:
|
||||
related_incident = Incident.objects.get(id=rec_data['related_incident_id'])
|
||||
except Incident.DoesNotExist:
|
||||
pass
|
||||
|
||||
knowledge_article = None
|
||||
if 'knowledge_article_id' in rec_data:
|
||||
try:
|
||||
knowledge_article = KnowledgeBaseArticle.objects.get(id=rec_data['knowledge_article_id'])
|
||||
except KnowledgeBaseArticle.DoesNotExist:
|
||||
pass
|
||||
|
||||
suggested_expert = None
|
||||
if 'suggested_expert_id' in rec_data:
|
||||
try:
|
||||
from django.contrib.auth import get_user_model
|
||||
User = get_user_model()
|
||||
suggested_expert = User.objects.get(id=rec_data['suggested_expert_id'])
|
||||
except User.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Create recommendation
|
||||
recommendation = IncidentRecommendation.objects.create(
|
||||
incident=incident,
|
||||
recommendation_type=rec_data['recommendation_type'],
|
||||
title=rec_data['title'],
|
||||
description=rec_data['description'],
|
||||
similarity_score=rec_data['similarity_score'],
|
||||
confidence_level=rec_data['confidence_level'],
|
||||
confidence_score=rec_data['confidence_score'],
|
||||
related_incident=related_incident,
|
||||
knowledge_article=knowledge_article,
|
||||
suggested_expert=suggested_expert,
|
||||
suggested_actions=rec_data.get('suggested_actions', []),
|
||||
expected_outcome=rec_data.get('expected_outcome', ''),
|
||||
reasoning=rec_data['reasoning'],
|
||||
matching_factors=rec_data.get('matching_factors', []),
|
||||
model_version=self.model_version
|
||||
)
|
||||
|
||||
return recommendation
|
||||
Reference in New Issue
Block a user