Updates
This commit is contained in:
@@ -0,0 +1,691 @@
|
||||
# Knowledge & Learning API Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The Knowledge & Learning module provides comprehensive functionality for automated postmortem generation, knowledge base management, and intelligent incident recommendations. This module helps organizations learn from incidents and build institutional knowledge to prevent future issues.
|
||||
|
||||
## Features
|
||||
|
||||
- **Automated Postmortems**: Generate postmortems automatically from incident data
|
||||
- **Knowledge Base**: Manage and search knowledge articles, runbooks, and troubleshooting guides
|
||||
- **Recommendation Engine**: Suggest similar incidents, solutions, and experts
|
||||
- **Learning Patterns**: Identify and track patterns from incident data
|
||||
- **Usage Analytics**: Track how knowledge is being used and its effectiveness
|
||||
|
||||
## API Endpoints
|
||||
|
||||
### Postmortems
|
||||
|
||||
#### List Postmortems
|
||||
```
|
||||
GET /api/knowledge/postmortems/
|
||||
```
|
||||
|
||||
**Query Parameters:**
|
||||
- `status`: Filter by status (DRAFT, IN_REVIEW, APPROVED, PUBLISHED, ARCHIVED)
|
||||
- `severity`: Filter by severity (LOW, MEDIUM, HIGH, CRITICAL)
|
||||
- `is_automated`: Filter by automation status (true/false)
|
||||
- `owner`: Filter by owner username
|
||||
- `search`: Search in title, executive_summary, root_cause_analysis
|
||||
- `ordering`: Order by created_at, updated_at, due_date, severity
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"count": 25,
|
||||
"next": "http://api.example.com/api/knowledge/postmortems/?page=2",
|
||||
"previous": null,
|
||||
"results": [
|
||||
{
|
||||
"id": "uuid",
|
||||
"title": "Postmortem: Database Outage",
|
||||
"incident": "uuid",
|
||||
"incident_title": "Database Connection Timeout",
|
||||
"status": "PUBLISHED",
|
||||
"severity": "HIGH",
|
||||
"owner_username": "john.doe",
|
||||
"completion_percentage": 95.0,
|
||||
"is_overdue": false,
|
||||
"created_at": "2024-01-15T10:30:00Z",
|
||||
"due_date": "2024-01-22T10:30:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Get Postmortem Details
|
||||
```
|
||||
GET /api/knowledge/postmortems/{id}/
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "uuid",
|
||||
"title": "Postmortem: Database Outage",
|
||||
"incident": "uuid",
|
||||
"incident_title": "Database Connection Timeout",
|
||||
"executive_summary": "On January 15, 2024, a high severity incident occurred...",
|
||||
"timeline": [
|
||||
{
|
||||
"timestamp": "2024-01-15T10:30:00Z",
|
||||
"event": "Incident reported",
|
||||
"description": "Database connection timeout detected",
|
||||
"actor": "monitoring.system"
|
||||
}
|
||||
],
|
||||
"root_cause_analysis": "The root cause was identified as...",
|
||||
"impact_assessment": "The incident affected 500 users...",
|
||||
"lessons_learned": "Key lessons learned include...",
|
||||
"action_items": [
|
||||
{
|
||||
"title": "Update database connection pool settings",
|
||||
"description": "Increase connection pool size to handle peak load",
|
||||
"priority": "HIGH",
|
||||
"assignee": "database.team",
|
||||
"due_date": "2024-01-29T00:00:00Z",
|
||||
"category": "Technical Improvement"
|
||||
}
|
||||
],
|
||||
"is_automated": true,
|
||||
"generation_confidence": 0.85,
|
||||
"auto_generated_sections": ["executive_summary", "timeline", "root_cause_analysis"],
|
||||
"status": "PUBLISHED",
|
||||
"severity": "HIGH",
|
||||
"owner": "uuid",
|
||||
"owner_username": "john.doe",
|
||||
"reviewers": ["uuid1", "uuid2"],
|
||||
"reviewer_usernames": ["jane.smith", "bob.wilson"],
|
||||
"approver": "uuid",
|
||||
"approver_username": "alice.johnson",
|
||||
"created_at": "2024-01-15T10:30:00Z",
|
||||
"updated_at": "2024-01-16T14:20:00Z",
|
||||
"published_at": "2024-01-16T14:20:00Z",
|
||||
"due_date": "2024-01-22T10:30:00Z",
|
||||
"related_incidents": ["uuid1", "uuid2"],
|
||||
"affected_services": ["database", "api"],
|
||||
"affected_teams": ["database.team", "platform.team"],
|
||||
"completion_percentage": 95.0,
|
||||
"is_overdue": false
|
||||
}
|
||||
```
|
||||
|
||||
#### Create Postmortem
|
||||
```
|
||||
POST /api/knowledge/postmortems/
|
||||
```
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"title": "Postmortem: New Incident",
|
||||
"incident": "uuid",
|
||||
"executive_summary": "Executive summary...",
|
||||
"severity": "HIGH",
|
||||
"owner": "uuid",
|
||||
"due_date": "2024-01-29T00:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
#### Generate Automated Postmortem
|
||||
```
|
||||
POST /api/knowledge/postmortems/{id}/generate_automated/
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"message": "Postmortem generated successfully",
|
||||
"confidence_score": 0.85
|
||||
}
|
||||
```
|
||||
|
||||
#### Approve Postmortem
|
||||
```
|
||||
POST /api/knowledge/postmortems/{id}/approve/
|
||||
```
|
||||
|
||||
#### Publish Postmortem
|
||||
```
|
||||
POST /api/knowledge/postmortems/{id}/publish/
|
||||
```
|
||||
|
||||
#### Get Overdue Postmortems
|
||||
```
|
||||
GET /api/knowledge/postmortems/overdue/
|
||||
```
|
||||
|
||||
#### Get Postmortem Statistics
|
||||
```
|
||||
GET /api/knowledge/postmortems/statistics/
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"total_postmortems": 150,
|
||||
"by_status": {
|
||||
"DRAFT": 25,
|
||||
"IN_REVIEW": 15,
|
||||
"APPROVED": 20,
|
||||
"PUBLISHED": 85,
|
||||
"ARCHIVED": 5
|
||||
},
|
||||
"by_severity": {
|
||||
"LOW": 10,
|
||||
"MEDIUM": 45,
|
||||
"HIGH": 70,
|
||||
"CRITICAL": 25
|
||||
},
|
||||
"automated_percentage": 75.5,
|
||||
"overdue_count": 8,
|
||||
"avg_completion_time": "5 days, 12:30:00"
|
||||
}
|
||||
```
|
||||
|
||||
### Knowledge Base Articles
|
||||
|
||||
#### List Knowledge Base Articles
|
||||
```
|
||||
GET /api/knowledge/knowledge-articles/
|
||||
```
|
||||
|
||||
**Query Parameters:**
|
||||
- `article_type`: Filter by type (RUNBOOK, TROUBLESHOOTING, BEST_PRACTICE, etc.)
|
||||
- `category`: Filter by category
|
||||
- `subcategory`: Filter by subcategory
|
||||
- `status`: Filter by status (DRAFT, REVIEW, APPROVED, PUBLISHED, DEPRECATED)
|
||||
- `is_featured`: Filter featured articles (true/false)
|
||||
- `difficulty_level`: Filter by difficulty (BEGINNER, INTERMEDIATE, ADVANCED, EXPERT)
|
||||
- `search`: Search in title, content, summary, tags, search_keywords
|
||||
- `ordering`: Order by created_at, updated_at, view_count, title
|
||||
|
||||
#### Get Knowledge Base Article
|
||||
```
|
||||
GET /api/knowledge/knowledge-articles/{slug}/
|
||||
```
|
||||
|
||||
**Note:** This endpoint automatically increments the view count.
|
||||
|
||||
#### Create Knowledge Base Article
|
||||
```
|
||||
POST /api/knowledge/knowledge-articles/
|
||||
```
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"title": "Database Troubleshooting Guide",
|
||||
"content": "This guide covers common database issues...",
|
||||
"summary": "Comprehensive guide for database troubleshooting",
|
||||
"article_type": "TROUBLESHOOTING",
|
||||
"category": "Database",
|
||||
"subcategory": "Performance",
|
||||
"tags": ["database", "troubleshooting", "performance"],
|
||||
"difficulty_level": "INTERMEDIATE",
|
||||
"related_services": ["database", "api"],
|
||||
"related_components": ["postgresql", "connection-pool"]
|
||||
}
|
||||
```
|
||||
|
||||
#### Rate Knowledge Base Article
|
||||
```
|
||||
POST /api/knowledge/knowledge-articles/{slug}/rate/
|
||||
```
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"rating": 4,
|
||||
"feedback": "Very helpful guide, saved me hours of debugging"
|
||||
}
|
||||
```
|
||||
|
||||
#### Bookmark Knowledge Base Article
|
||||
```
|
||||
POST /api/knowledge/knowledge-articles/{slug}/bookmark/
|
||||
```
|
||||
|
||||
#### Search Knowledge Base
|
||||
```
|
||||
POST /api/knowledge/knowledge-articles/search/
|
||||
```
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"query": "database connection timeout",
|
||||
"article_types": ["RUNBOOK", "TROUBLESHOOTING"],
|
||||
"categories": ["Database"],
|
||||
"difficulty_levels": ["INTERMEDIATE", "ADVANCED"],
|
||||
"limit": 20,
|
||||
"offset": 0
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"id": "uuid",
|
||||
"title": "Database Connection Troubleshooting",
|
||||
"slug": "database-connection-troubleshooting",
|
||||
"summary": "Guide for resolving database connection issues",
|
||||
"article_type": "TROUBLESHOOTING",
|
||||
"category": "Database",
|
||||
"similarity_score": 0.85,
|
||||
"relevance_score": 0.92,
|
||||
"popularity_score": 0.75,
|
||||
"matching_keywords": ["database", "connection", "timeout"]
|
||||
}
|
||||
],
|
||||
"total_count": 15,
|
||||
"query": "database connection timeout",
|
||||
"filters": {
|
||||
"article_types": ["RUNBOOK", "TROUBLESHOOTING"],
|
||||
"categories": ["Database"],
|
||||
"difficulty_levels": ["INTERMEDIATE", "ADVANCED"]
|
||||
},
|
||||
"pagination": {
|
||||
"limit": 20,
|
||||
"offset": 0,
|
||||
"has_more": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Get Articles Due for Review
|
||||
```
|
||||
GET /api/knowledge/knowledge-articles/due_for_review/
|
||||
```
|
||||
|
||||
#### Get Popular Articles
|
||||
```
|
||||
GET /api/knowledge/knowledge-articles/popular/
|
||||
```
|
||||
|
||||
#### Get Knowledge Base Statistics
|
||||
```
|
||||
GET /api/knowledge/knowledge-articles/statistics/
|
||||
```
|
||||
|
||||
### Incident Recommendations
|
||||
|
||||
#### List Recommendations
|
||||
```
|
||||
GET /api/knowledge/recommendations/
|
||||
```
|
||||
|
||||
**Query Parameters:**
|
||||
- `recommendation_type`: Filter by type (SIMILAR_INCIDENT, SOLUTION, KNOWLEDGE_ARTICLE, etc.)
|
||||
- `confidence_level`: Filter by confidence (LOW, MEDIUM, HIGH, VERY_HIGH)
|
||||
- `is_applied`: Filter by application status (true/false)
|
||||
- `incident`: Filter by incident ID
|
||||
- `search`: Search in title, description, reasoning
|
||||
- `ordering`: Order by created_at, confidence_score, similarity_score
|
||||
|
||||
#### Get Recommendation Details
|
||||
```
|
||||
GET /api/knowledge/recommendations/{id}/
|
||||
```
|
||||
|
||||
#### Apply Recommendation
|
||||
```
|
||||
POST /api/knowledge/recommendations/{id}/apply/
|
||||
```
|
||||
|
||||
#### Rate Recommendation Effectiveness
|
||||
```
|
||||
POST /api/knowledge/recommendations/{id}/rate_effectiveness/
|
||||
```
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"rating": 4
|
||||
}
|
||||
```
|
||||
|
||||
#### Generate Recommendations for Incident
|
||||
```
|
||||
POST /api/knowledge/recommendations/generate_for_incident/
|
||||
```
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"incident_id": "uuid",
|
||||
"recommendation_types": ["SIMILAR_INCIDENT", "KNOWLEDGE_ARTICLE", "SOLUTION"],
|
||||
"max_recommendations": 5,
|
||||
"min_confidence": 0.6
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"message": "Recommendations generated successfully",
|
||||
"recommendations": [
|
||||
{
|
||||
"id": "uuid",
|
||||
"title": "Similar Incident: Database Timeout Issue",
|
||||
"type": "SIMILAR_INCIDENT",
|
||||
"confidence_score": 0.85,
|
||||
"similarity_score": 0.78
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Get Recommendation Statistics
|
||||
```
|
||||
GET /api/knowledge/recommendations/statistics/
|
||||
```
|
||||
|
||||
### Learning Patterns
|
||||
|
||||
#### List Learning Patterns
|
||||
```
|
||||
GET /api/knowledge/learning-patterns/
|
||||
```
|
||||
|
||||
**Query Parameters:**
|
||||
- `pattern_type`: Filter by type (ROOT_CAUSE, RESOLUTION, PREVENTION, etc.)
|
||||
- `is_validated`: Filter by validation status (true/false)
|
||||
- `search`: Search in name, description, triggers, actions
|
||||
- `ordering`: Order by created_at, confidence_score, frequency, success_rate
|
||||
|
||||
#### Get Learning Pattern Details
|
||||
```
|
||||
GET /api/knowledge/learning-patterns/{id}/
|
||||
```
|
||||
|
||||
#### Validate Learning Pattern
|
||||
```
|
||||
POST /api/knowledge/learning-patterns/{id}/validate/
|
||||
```
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"validation_notes": "This pattern has been validated by the expert team"
|
||||
}
|
||||
```
|
||||
|
||||
#### Apply Learning Pattern
|
||||
```
|
||||
POST /api/knowledge/learning-patterns/{id}/apply/
|
||||
```
|
||||
|
||||
#### Get Learning Pattern Statistics
|
||||
```
|
||||
GET /api/knowledge/learning-patterns/statistics/
|
||||
```
|
||||
|
||||
### Automated Postmortem Generation
|
||||
|
||||
#### List Generation Logs
|
||||
```
|
||||
GET /api/knowledge/postmortem-generations/
|
||||
```
|
||||
|
||||
**Query Parameters:**
|
||||
- `status`: Filter by status (PENDING, PROCESSING, COMPLETED, FAILED, REVIEW_REQUIRED)
|
||||
- `incident`: Filter by incident ID
|
||||
- `generation_trigger`: Filter by trigger type
|
||||
- `ordering`: Order by started_at, completed_at, processing_time
|
||||
|
||||
#### Get Generation Log Details
|
||||
```
|
||||
GET /api/knowledge/postmortem-generations/{id}/
|
||||
```
|
||||
|
||||
#### Generate Postmortem for Incident
|
||||
```
|
||||
POST /api/knowledge/postmortem-generations/generate_postmortem/
|
||||
```
|
||||
|
||||
**Request Body:**
|
||||
```json
|
||||
{
|
||||
"incident_id": "uuid",
|
||||
"include_timeline": true,
|
||||
"include_logs": true,
|
||||
"generation_trigger": "manual"
|
||||
}
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"message": "Postmortem generation initiated",
|
||||
"generation_id": "uuid"
|
||||
}
|
||||
```
|
||||
|
||||
## Data Models
|
||||
|
||||
### Postmortem
|
||||
- **id**: UUID (Primary Key)
|
||||
- **title**: String (200 chars)
|
||||
- **incident**: Foreign Key to Incident
|
||||
- **executive_summary**: Text
|
||||
- **timeline**: JSON Array
|
||||
- **root_cause_analysis**: Text
|
||||
- **impact_assessment**: Text
|
||||
- **lessons_learned**: Text
|
||||
- **action_items**: JSON Array
|
||||
- **is_automated**: Boolean
|
||||
- **generation_confidence**: Float (0.0-1.0)
|
||||
- **auto_generated_sections**: JSON Array
|
||||
- **status**: Choice (DRAFT, IN_REVIEW, APPROVED, PUBLISHED, ARCHIVED)
|
||||
- **severity**: Choice (LOW, MEDIUM, HIGH, CRITICAL)
|
||||
- **owner**: Foreign Key to User
|
||||
- **reviewers**: Many-to-Many to User
|
||||
- **approver**: Foreign Key to User
|
||||
- **created_at**: DateTime
|
||||
- **updated_at**: DateTime
|
||||
- **published_at**: DateTime
|
||||
- **due_date**: DateTime
|
||||
- **related_incidents**: Many-to-Many to Incident
|
||||
- **affected_services**: JSON Array
|
||||
- **affected_teams**: JSON Array
|
||||
|
||||
### KnowledgeBaseArticle
|
||||
- **id**: UUID (Primary Key)
|
||||
- **title**: String (200 chars)
|
||||
- **slug**: Slug (Unique)
|
||||
- **content**: Text
|
||||
- **summary**: Text
|
||||
- **tags**: JSON Array
|
||||
- **article_type**: Choice (RUNBOOK, TROUBLESHOOTING, BEST_PRACTICE, etc.)
|
||||
- **category**: String (100 chars)
|
||||
- **subcategory**: String (100 chars)
|
||||
- **related_services**: JSON Array
|
||||
- **related_components**: JSON Array
|
||||
- **status**: Choice (DRAFT, REVIEW, APPROVED, PUBLISHED, DEPRECATED)
|
||||
- **is_featured**: Boolean
|
||||
- **view_count**: Positive Integer
|
||||
- **author**: Foreign Key to User
|
||||
- **last_updated_by**: Foreign Key to User
|
||||
- **maintainer**: Foreign Key to User
|
||||
- **created_at**: DateTime
|
||||
- **updated_at**: DateTime
|
||||
- **last_reviewed**: DateTime
|
||||
- **next_review_due**: DateTime
|
||||
- **related_incidents**: Many-to-Many to Incident
|
||||
- **source_postmortems**: Many-to-Many to Postmortem
|
||||
- **confluence_url**: URL
|
||||
- **wiki_url**: URL
|
||||
- **external_references**: JSON Array
|
||||
- **search_keywords**: JSON Array
|
||||
- **difficulty_level**: Choice (BEGINNER, INTERMEDIATE, ADVANCED, EXPERT)
|
||||
|
||||
### IncidentRecommendation
|
||||
- **id**: UUID (Primary Key)
|
||||
- **incident**: Foreign Key to Incident
|
||||
- **recommendation_type**: Choice (SIMILAR_INCIDENT, SOLUTION, KNOWLEDGE_ARTICLE, etc.)
|
||||
- **title**: String (200 chars)
|
||||
- **description**: Text
|
||||
- **similarity_score**: Float (0.0-1.0)
|
||||
- **confidence_level**: Choice (LOW, MEDIUM, HIGH, VERY_HIGH)
|
||||
- **confidence_score**: Float (0.0-1.0)
|
||||
- **related_incident**: Foreign Key to Incident
|
||||
- **knowledge_article**: Foreign Key to KnowledgeBaseArticle
|
||||
- **suggested_expert**: Foreign Key to User
|
||||
- **suggested_actions**: JSON Array
|
||||
- **expected_outcome**: Text
|
||||
- **time_to_implement**: Duration
|
||||
- **is_applied**: Boolean
|
||||
- **applied_at**: DateTime
|
||||
- **applied_by**: Foreign Key to User
|
||||
- **effectiveness_rating**: Positive Integer (1-5)
|
||||
- **reasoning**: Text
|
||||
- **matching_factors**: JSON Array
|
||||
- **model_version**: String (50 chars)
|
||||
- **created_at**: DateTime
|
||||
- **updated_at**: DateTime
|
||||
|
||||
### LearningPattern
|
||||
- **id**: UUID (Primary Key)
|
||||
- **name**: String (200 chars)
|
||||
- **pattern_type**: Choice (ROOT_CAUSE, RESOLUTION, PREVENTION, etc.)
|
||||
- **description**: Text
|
||||
- **frequency**: Positive Integer
|
||||
- **success_rate**: Float (0.0-1.0)
|
||||
- **confidence_score**: Float (0.0-1.0)
|
||||
- **triggers**: JSON Array
|
||||
- **actions**: JSON Array
|
||||
- **outcomes**: JSON Array
|
||||
- **source_incidents**: Many-to-Many to Incident
|
||||
- **source_postmortems**: Many-to-Many to Postmortem
|
||||
- **is_validated**: Boolean
|
||||
- **validated_by**: Foreign Key to User
|
||||
- **validation_notes**: Text
|
||||
- **times_applied**: Positive Integer
|
||||
- **last_applied**: DateTime
|
||||
- **created_at**: DateTime
|
||||
- **updated_at**: DateTime
|
||||
|
||||
## Management Commands
|
||||
|
||||
### Generate Postmortems
|
||||
```bash
|
||||
python manage.py generate_postmortems --days 7 --severity HIGH --force
|
||||
```
|
||||
|
||||
**Options:**
|
||||
- `--days`: Number of days back to look for resolved incidents (default: 7)
|
||||
- `--severity`: Only generate for specific severity levels
|
||||
- `--force`: Force generation even if postmortem exists
|
||||
- `--dry-run`: Show what would be generated without creating
|
||||
|
||||
### Generate Recommendations
|
||||
```bash
|
||||
python manage.py generate_recommendations --days 1 --status OPEN --max-recommendations 5
|
||||
```
|
||||
|
||||
**Options:**
|
||||
- `--days`: Number of days back to look for incidents (default: 1)
|
||||
- `--status`: Only generate for specific incident status (default: OPEN)
|
||||
- `--severity`: Only generate for specific severity levels
|
||||
- `--force`: Force generation even if recommendations exist
|
||||
- `--max-recommendations`: Maximum recommendations per incident (default: 5)
|
||||
- `--dry-run`: Show what would be generated without creating
|
||||
|
||||
### Update Learning Patterns
|
||||
```bash
|
||||
python manage.py update_learning_patterns --days 30 --min-frequency 3
|
||||
```
|
||||
|
||||
**Options:**
|
||||
- `--days`: Number of days back to analyze (default: 30)
|
||||
- `--min-frequency`: Minimum frequency to create pattern (default: 3)
|
||||
- `--dry-run`: Show what patterns would be created/updated
|
||||
|
||||
## Integration Points
|
||||
|
||||
### Incident Intelligence Module
|
||||
- **Incident Model**: Primary relationship for postmortems and recommendations
|
||||
- **Incident Resolution**: Triggers automatic postmortem generation
|
||||
- **Incident Classification**: Used for similarity matching in recommendations
|
||||
|
||||
### Analytics & Predictive Insights Module
|
||||
- **KPI Calculations**: Postmortem completion rates, knowledge base usage
|
||||
- **Pattern Detection**: Integration with learning patterns for trend analysis
|
||||
- **Predictive Models**: Use learning patterns for incident prediction
|
||||
|
||||
### Automation & Orchestration Module
|
||||
- **Runbook Integration**: Knowledge base articles can be linked to runbooks
|
||||
- **Automated Actions**: Postmortem action items can trigger automation workflows
|
||||
|
||||
### Security Module
|
||||
- **Access Control**: Knowledge base articles respect data classification levels
|
||||
- **Audit Logging**: All knowledge base usage is tracked for compliance
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Postmortem Management
|
||||
1. **Automated Generation**: Enable automatic postmortem generation for high-severity incidents
|
||||
2. **Review Process**: Implement a structured review and approval workflow
|
||||
3. **Action Item Tracking**: Ensure action items are assigned and tracked to completion
|
||||
4. **Timeline Accuracy**: Verify and enhance auto-generated timelines with human input
|
||||
|
||||
### Knowledge Base Management
|
||||
1. **Content Quality**: Regularly review and update knowledge base articles
|
||||
2. **Search Optimization**: Use relevant tags and keywords for better discoverability
|
||||
3. **User Feedback**: Collect and act on user ratings and feedback
|
||||
4. **Review Schedule**: Set up regular review cycles for knowledge base articles
|
||||
|
||||
### Recommendation Engine
|
||||
1. **Confidence Thresholds**: Set appropriate confidence thresholds for different use cases
|
||||
2. **Feedback Loop**: Collect effectiveness ratings to improve recommendation quality
|
||||
3. **Pattern Validation**: Regularly validate learning patterns with subject matter experts
|
||||
4. **Continuous Learning**: Update models based on new incident data and outcomes
|
||||
|
||||
### Learning Patterns
|
||||
1. **Pattern Validation**: Have experts validate patterns before they're used for recommendations
|
||||
2. **Success Tracking**: Monitor the success rate of applied patterns
|
||||
3. **Pattern Evolution**: Update patterns as new data becomes available
|
||||
4. **Knowledge Sharing**: Share validated patterns across teams and organizations
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API returns appropriate HTTP status codes and error messages:
|
||||
|
||||
- **400 Bad Request**: Invalid request data or parameters
|
||||
- **401 Unauthorized**: Authentication required
|
||||
- **403 Forbidden**: Insufficient permissions
|
||||
- **404 Not Found**: Resource not found
|
||||
- **500 Internal Server Error**: Server-side error
|
||||
|
||||
**Error Response Format:**
|
||||
```json
|
||||
{
|
||||
"error": "Error message describing what went wrong",
|
||||
"details": "Additional error details if available",
|
||||
"code": "ERROR_CODE"
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
API endpoints are rate-limited to prevent abuse:
|
||||
- **Read Operations**: 1000 requests per hour per user
|
||||
- **Write Operations**: 100 requests per hour per user
|
||||
- **Search Operations**: 500 requests per hour per user
|
||||
|
||||
## Authentication
|
||||
|
||||
All API endpoints require authentication using one of the following methods:
|
||||
- **Token Authentication**: Include `Authorization: Token <token>` header
|
||||
- **Session Authentication**: Use Django session authentication
|
||||
- **SSO Authentication**: Use configured SSO providers
|
||||
|
||||
## Permissions
|
||||
|
||||
- **Read Access**: All authenticated users can read published knowledge base articles
|
||||
- **Write Access**: Users need appropriate permissions to create/edit postmortems and articles
|
||||
- **Admin Access**: Only admin users can manage learning patterns and system settings
|
||||
- **Data Classification**: Access to sensitive content is controlled by data classification levels
|
||||
0
ETB-API/knowledge_learning/__init__.py
Normal file
0
ETB-API/knowledge_learning/__init__.py
Normal file
BIN
ETB-API/knowledge_learning/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
ETB-API/knowledge_learning/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/knowledge_learning/__pycache__/admin.cpython-312.pyc
Normal file
BIN
ETB-API/knowledge_learning/__pycache__/admin.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/knowledge_learning/__pycache__/apps.cpython-312.pyc
Normal file
BIN
ETB-API/knowledge_learning/__pycache__/apps.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/knowledge_learning/__pycache__/models.cpython-312.pyc
Normal file
BIN
ETB-API/knowledge_learning/__pycache__/models.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/knowledge_learning/__pycache__/signals.cpython-312.pyc
Normal file
BIN
ETB-API/knowledge_learning/__pycache__/signals.cpython-312.pyc
Normal file
Binary file not shown.
BIN
ETB-API/knowledge_learning/__pycache__/urls.cpython-312.pyc
Normal file
BIN
ETB-API/knowledge_learning/__pycache__/urls.cpython-312.pyc
Normal file
Binary file not shown.
261
ETB-API/knowledge_learning/admin.py
Normal file
261
ETB-API/knowledge_learning/admin.py
Normal file
@@ -0,0 +1,261 @@
|
||||
from django.contrib import admin
|
||||
from django.utils.html import format_html
|
||||
from django.urls import reverse
|
||||
from django.utils.safestring import mark_safe
|
||||
|
||||
from .models import (
|
||||
Postmortem, KnowledgeBaseArticle, IncidentRecommendation,
|
||||
LearningPattern, KnowledgeBaseUsage, AutomatedPostmortemGeneration
|
||||
)
|
||||
|
||||
|
||||
@admin.register(Postmortem)
|
||||
class PostmortemAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
'title', 'incident_link', 'status', 'severity', 'owner',
|
||||
'completion_percentage', 'is_overdue', 'created_at'
|
||||
]
|
||||
list_filter = ['status', 'severity', 'is_automated', 'created_at']
|
||||
search_fields = ['title', 'incident__title', 'owner__username']
|
||||
readonly_fields = ['id', 'created_at', 'updated_at', 'completion_percentage', 'is_overdue']
|
||||
fieldsets = (
|
||||
('Basic Information', {
|
||||
'fields': ('id', 'title', 'incident', 'status', 'severity')
|
||||
}),
|
||||
('Content', {
|
||||
'fields': ('executive_summary', 'timeline', 'root_cause_analysis',
|
||||
'impact_assessment', 'lessons_learned', 'action_items')
|
||||
}),
|
||||
('Automation', {
|
||||
'fields': ('is_automated', 'generation_confidence', 'auto_generated_sections')
|
||||
}),
|
||||
('Workflow', {
|
||||
'fields': ('owner', 'reviewers', 'approver', 'due_date')
|
||||
}),
|
||||
('Context', {
|
||||
'fields': ('related_incidents', 'affected_services', 'affected_teams')
|
||||
}),
|
||||
('Timestamps', {
|
||||
'fields': ('created_at', 'updated_at', 'published_at'),
|
||||
'classes': ('collapse',)
|
||||
})
|
||||
)
|
||||
filter_horizontal = ['reviewers', 'related_incidents']
|
||||
|
||||
def incident_link(self, obj):
|
||||
if obj.incident:
|
||||
url = reverse('admin:incident_intelligence_incident_change', args=[obj.incident.id])
|
||||
return format_html('<a href="{}">{}</a>', url, obj.incident.title)
|
||||
return '-'
|
||||
incident_link.short_description = 'Incident'
|
||||
|
||||
def completion_percentage(self, obj):
|
||||
percentage = obj.get_completion_percentage()
|
||||
color = 'green' if percentage >= 80 else 'orange' if percentage >= 50 else 'red'
|
||||
return format_html(
|
||||
'<span style="color: {};">{:.1f}%</span>',
|
||||
color, percentage
|
||||
)
|
||||
completion_percentage.short_description = 'Completion'
|
||||
|
||||
def is_overdue(self, obj):
|
||||
if obj.is_overdue:
|
||||
return format_html('<span style="color: red;">Yes</span>')
|
||||
return format_html('<span style="color: green;">No</span>')
|
||||
is_overdue.short_description = 'Overdue'
|
||||
|
||||
|
||||
@admin.register(KnowledgeBaseArticle)
|
||||
class KnowledgeBaseArticleAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
'title', 'article_type', 'category', 'status', 'view_count',
|
||||
'author', 'is_due_for_review', 'created_at'
|
||||
]
|
||||
list_filter = ['article_type', 'category', 'status', 'difficulty_level', 'is_featured', 'created_at']
|
||||
search_fields = ['title', 'content', 'summary', 'tags', 'author__username']
|
||||
readonly_fields = ['id', 'created_at', 'updated_at', 'view_count', 'is_due_for_review']
|
||||
fieldsets = (
|
||||
('Basic Information', {
|
||||
'fields': ('id', 'title', 'slug', 'article_type', 'category', 'subcategory')
|
||||
}),
|
||||
('Content', {
|
||||
'fields': ('content', 'summary', 'tags', 'search_keywords')
|
||||
}),
|
||||
('Classification', {
|
||||
'fields': ('related_services', 'related_components', 'difficulty_level')
|
||||
}),
|
||||
('Status & Workflow', {
|
||||
'fields': ('status', 'is_featured', 'author', 'last_updated_by', 'maintainer')
|
||||
}),
|
||||
('Review Schedule', {
|
||||
'fields': ('last_reviewed', 'next_review_due', 'is_due_for_review')
|
||||
}),
|
||||
('External Links', {
|
||||
'fields': ('confluence_url', 'wiki_url', 'external_references'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
('Relationships', {
|
||||
'fields': ('related_incidents', 'source_postmortems'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
('Statistics', {
|
||||
'fields': ('view_count', 'created_at', 'updated_at'),
|
||||
'classes': ('collapse',)
|
||||
})
|
||||
)
|
||||
filter_horizontal = ['related_incidents', 'source_postmortems']
|
||||
|
||||
def is_due_for_review(self, obj):
|
||||
if obj.is_due_for_review():
|
||||
return format_html('<span style="color: red;">Yes</span>')
|
||||
return format_html('<span style="color: green;">No</span>')
|
||||
is_due_for_review.short_description = 'Due for Review'
|
||||
|
||||
|
||||
@admin.register(IncidentRecommendation)
|
||||
class IncidentRecommendationAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
'title', 'incident_link', 'recommendation_type', 'confidence_level',
|
||||
'similarity_score', 'is_applied', 'created_at'
|
||||
]
|
||||
list_filter = ['recommendation_type', 'confidence_level', 'is_applied', 'created_at']
|
||||
search_fields = ['title', 'description', 'incident__title', 'reasoning']
|
||||
readonly_fields = ['id', 'created_at', 'updated_at']
|
||||
fieldsets = (
|
||||
('Basic Information', {
|
||||
'fields': ('id', 'incident', 'recommendation_type', 'title', 'description')
|
||||
}),
|
||||
('Scores', {
|
||||
'fields': ('similarity_score', 'confidence_level', 'confidence_score')
|
||||
}),
|
||||
('Related Objects', {
|
||||
'fields': ('related_incident', 'knowledge_article', 'suggested_expert')
|
||||
}),
|
||||
('Recommendation Details', {
|
||||
'fields': ('suggested_actions', 'expected_outcome', 'time_to_implement')
|
||||
}),
|
||||
('Usage Tracking', {
|
||||
'fields': ('is_applied', 'applied_at', 'applied_by', 'effectiveness_rating')
|
||||
}),
|
||||
('AI Analysis', {
|
||||
'fields': ('reasoning', 'matching_factors', 'model_version'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
('Timestamps', {
|
||||
'fields': ('created_at', 'updated_at'),
|
||||
'classes': ('collapse',)
|
||||
})
|
||||
)
|
||||
|
||||
def incident_link(self, obj):
|
||||
if obj.incident:
|
||||
url = reverse('admin:incident_intelligence_incident_change', args=[obj.incident.id])
|
||||
return format_html('<a href="{}">{}</a>', url, obj.incident.title)
|
||||
return '-'
|
||||
incident_link.short_description = 'Incident'
|
||||
|
||||
|
||||
@admin.register(LearningPattern)
|
||||
class LearningPatternAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
'name', 'pattern_type', 'frequency', 'success_rate',
|
||||
'confidence_score', 'is_validated', 'times_applied'
|
||||
]
|
||||
list_filter = ['pattern_type', 'is_validated', 'created_at']
|
||||
search_fields = ['name', 'description', 'validated_by__username']
|
||||
readonly_fields = ['id', 'created_at', 'updated_at']
|
||||
fieldsets = (
|
||||
('Basic Information', {
|
||||
'fields': ('id', 'name', 'pattern_type', 'description')
|
||||
}),
|
||||
('Pattern Characteristics', {
|
||||
'fields': ('frequency', 'success_rate', 'confidence_score')
|
||||
}),
|
||||
('Pattern Details', {
|
||||
'fields': ('triggers', 'actions', 'outcomes')
|
||||
}),
|
||||
('Source Data', {
|
||||
'fields': ('source_incidents', 'source_postmortems'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
('Validation', {
|
||||
'fields': ('is_validated', 'validated_by', 'validation_notes')
|
||||
}),
|
||||
('Usage Tracking', {
|
||||
'fields': ('times_applied', 'last_applied')
|
||||
}),
|
||||
('Timestamps', {
|
||||
'fields': ('created_at', 'updated_at'),
|
||||
'classes': ('collapse',)
|
||||
})
|
||||
)
|
||||
filter_horizontal = ['source_incidents', 'source_postmortems']
|
||||
|
||||
|
||||
@admin.register(KnowledgeBaseUsage)
|
||||
class KnowledgeBaseUsageAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
'user', 'usage_type', 'article_link', 'recommendation_link',
|
||||
'incident_link', 'created_at'
|
||||
]
|
||||
list_filter = ['usage_type', 'created_at']
|
||||
search_fields = ['user__username', 'knowledge_article__title', 'recommendation__title']
|
||||
readonly_fields = ['id', 'created_at']
|
||||
|
||||
def article_link(self, obj):
|
||||
if obj.knowledge_article:
|
||||
url = reverse('admin:knowledge_learning_knowledgebasearticle_change', args=[obj.knowledge_article.id])
|
||||
return format_html('<a href="{}">{}</a>', url, obj.knowledge_article.title)
|
||||
return '-'
|
||||
article_link.short_description = 'Article'
|
||||
|
||||
def recommendation_link(self, obj):
|
||||
if obj.recommendation:
|
||||
url = reverse('admin:knowledge_learning_incidentrecommendation_change', args=[obj.recommendation.id])
|
||||
return format_html('<a href="{}">{}</a>', url, obj.recommendation.title)
|
||||
return '-'
|
||||
recommendation_link.short_description = 'Recommendation'
|
||||
|
||||
def incident_link(self, obj):
|
||||
if obj.incident:
|
||||
url = reverse('admin:incident_intelligence_incident_change', args=[obj.incident.id])
|
||||
return format_html('<a href="{}">{}</a>', url, obj.incident.title)
|
||||
return '-'
|
||||
incident_link.short_description = 'Incident'
|
||||
|
||||
|
||||
@admin.register(AutomatedPostmortemGeneration)
|
||||
class AutomatedPostmortemGenerationAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
'incident_link', 'status', 'generation_trigger', 'processing_time',
|
||||
'model_version', 'started_at'
|
||||
]
|
||||
list_filter = ['status', 'generation_trigger', 'model_version', 'started_at']
|
||||
search_fields = ['incident__title', 'error_message']
|
||||
readonly_fields = ['id', 'started_at', 'completed_at']
|
||||
fieldsets = (
|
||||
('Basic Information', {
|
||||
'fields': ('id', 'incident', 'status', 'generation_trigger')
|
||||
}),
|
||||
('Input Data', {
|
||||
'fields': ('incident_data', 'timeline_data', 'log_data'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
('Generation Results', {
|
||||
'fields': ('generated_content', 'confidence_scores', 'quality_metrics', 'generated_postmortem'),
|
||||
'classes': ('collapse',)
|
||||
}),
|
||||
('Processing Details', {
|
||||
'fields': ('processing_time', 'model_version', 'error_message')
|
||||
}),
|
||||
('Timestamps', {
|
||||
'fields': ('started_at', 'completed_at')
|
||||
})
|
||||
)
|
||||
|
||||
def incident_link(self, obj):
|
||||
if obj.incident:
|
||||
url = reverse('admin:incident_intelligence_incident_change', args=[obj.incident.id])
|
||||
return format_html('<a href="{}">{}</a>', url, obj.incident.title)
|
||||
return '-'
|
||||
incident_link.short_description = 'Incident'
|
||||
11
ETB-API/knowledge_learning/apps.py
Normal file
11
ETB-API/knowledge_learning/apps.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class KnowledgeLearningConfig(AppConfig):
|
||||
default_auto_field = 'django.db.models.BigAutoField'
|
||||
name = 'knowledge_learning'
|
||||
verbose_name = 'Knowledge & Learning'
|
||||
|
||||
def ready(self):
|
||||
"""Import signals when the app is ready"""
|
||||
import knowledge_learning.signals
|
||||
1
ETB-API/knowledge_learning/management/__init__.py
Normal file
1
ETB-API/knowledge_learning/management/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Knowledge Learning Management Commands
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
# Management Commands
|
||||
@@ -0,0 +1,121 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
from incident_intelligence.models import Incident
|
||||
from knowledge_learning.services.postmortem_generator import PostmortemGenerator
|
||||
from knowledge_learning.models import Postmortem
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Generate automated postmortems for resolved incidents'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'--days',
|
||||
type=int,
|
||||
default=7,
|
||||
help='Number of days back to look for resolved incidents (default: 7)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--severity',
|
||||
type=str,
|
||||
choices=['LOW', 'MEDIUM', 'HIGH', 'CRITICAL', 'EMERGENCY'],
|
||||
help='Only generate postmortems for incidents with specific severity'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--force',
|
||||
action='store_true',
|
||||
help='Force generation even if postmortem already exists'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
help='Show what would be generated without actually creating postmortems'
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
days = options['days']
|
||||
severity = options['severity']
|
||||
force = options['force']
|
||||
dry_run = options['dry_run']
|
||||
|
||||
# Calculate date range
|
||||
end_date = timezone.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'Looking for resolved incidents from {start_date.date()} to {end_date.date()}')
|
||||
)
|
||||
|
||||
# Build queryset for resolved incidents
|
||||
queryset = Incident.objects.filter(
|
||||
status__in=['RESOLVED', 'CLOSED'],
|
||||
resolved_at__gte=start_date,
|
||||
resolved_at__lte=end_date
|
||||
)
|
||||
|
||||
if severity:
|
||||
queryset = queryset.filter(severity=severity)
|
||||
|
||||
# Filter out incidents that already have postmortems (unless force is used)
|
||||
if not force:
|
||||
existing_postmortem_incidents = Postmortem.objects.values_list('incident_id', flat=True)
|
||||
queryset = queryset.exclude(id__in=existing_postmortem_incidents)
|
||||
|
||||
incidents = list(queryset)
|
||||
|
||||
if not incidents:
|
||||
self.stdout.write(
|
||||
self.style.WARNING('No resolved incidents found matching criteria')
|
||||
)
|
||||
return
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'Found {len(incidents)} incidents to process')
|
||||
)
|
||||
|
||||
if dry_run:
|
||||
self.stdout.write(self.style.WARNING('DRY RUN - No postmortems will be created'))
|
||||
for incident in incidents:
|
||||
self.stdout.write(f' - {incident.title} ({incident.severity}) - {incident.resolved_at}')
|
||||
return
|
||||
|
||||
# Initialize postmortem generator
|
||||
generator = PostmortemGenerator()
|
||||
|
||||
success_count = 0
|
||||
error_count = 0
|
||||
|
||||
for incident in incidents:
|
||||
try:
|
||||
self.stdout.write(f'Generating postmortem for: {incident.title}')
|
||||
|
||||
result = generator.generate_postmortem_for_incident(
|
||||
incident_id=str(incident.id),
|
||||
include_timeline=True,
|
||||
include_logs=True,
|
||||
trigger='management_command'
|
||||
)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f' ✓ Generated postmortem {result["postmortem_id"]}')
|
||||
)
|
||||
success_count += 1
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f' ✗ Failed to generate postmortem: {str(e)}')
|
||||
)
|
||||
error_count += 1
|
||||
|
||||
# Summary
|
||||
self.stdout.write('\n' + '='*50)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'Postmortem generation completed:')
|
||||
)
|
||||
self.stdout.write(f' Successfully generated: {success_count}')
|
||||
if error_count > 0:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f' Failed: {error_count}')
|
||||
)
|
||||
self.stdout.write('='*50)
|
||||
@@ -0,0 +1,138 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
from incident_intelligence.models import Incident
|
||||
from knowledge_learning.services.recommendation_engine import RecommendationEngine
|
||||
from knowledge_learning.models import IncidentRecommendation
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Generate recommendations for incidents'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'--days',
|
||||
type=int,
|
||||
default=1,
|
||||
help='Number of days back to look for incidents (default: 1)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--status',
|
||||
type=str,
|
||||
choices=['OPEN', 'IN_PROGRESS', 'RESOLVED', 'CLOSED'],
|
||||
default='OPEN',
|
||||
help='Only generate recommendations for incidents with specific status (default: OPEN)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--severity',
|
||||
type=str,
|
||||
choices=['LOW', 'MEDIUM', 'HIGH', 'CRITICAL', 'EMERGENCY'],
|
||||
help='Only generate recommendations for incidents with specific severity'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--force',
|
||||
action='store_true',
|
||||
help='Force generation even if recommendations already exist'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
help='Show what would be generated without actually creating recommendations'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--max-recommendations',
|
||||
type=int,
|
||||
default=5,
|
||||
help='Maximum number of recommendations per incident (default: 5)'
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
days = options['days']
|
||||
status = options['status']
|
||||
severity = options['severity']
|
||||
force = options['force']
|
||||
dry_run = options['dry_run']
|
||||
max_recommendations = options['max_recommendations']
|
||||
|
||||
# Calculate date range
|
||||
end_date = timezone.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'Looking for {status} incidents from {start_date.date()} to {end_date.date()}')
|
||||
)
|
||||
|
||||
# Build queryset for incidents
|
||||
queryset = Incident.objects.filter(
|
||||
status=status,
|
||||
created_at__gte=start_date,
|
||||
created_at__lte=end_date
|
||||
)
|
||||
|
||||
if severity:
|
||||
queryset = queryset.filter(severity=severity)
|
||||
|
||||
# Filter out incidents that already have recommendations (unless force is used)
|
||||
if not force:
|
||||
existing_recommendation_incidents = IncidentRecommendation.objects.values_list('incident_id', flat=True)
|
||||
queryset = queryset.exclude(id__in=existing_recommendation_incidents)
|
||||
|
||||
incidents = list(queryset)
|
||||
|
||||
if not incidents:
|
||||
self.stdout.write(
|
||||
self.style.WARNING('No incidents found matching criteria')
|
||||
)
|
||||
return
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'Found {len(incidents)} incidents to process')
|
||||
)
|
||||
|
||||
if dry_run:
|
||||
self.stdout.write(self.style.WARNING('DRY RUN - No recommendations will be created'))
|
||||
for incident in incidents:
|
||||
self.stdout.write(f' - {incident.title} ({incident.severity}) - {incident.created_at}')
|
||||
return
|
||||
|
||||
# Initialize recommendation engine
|
||||
recommendation_engine = RecommendationEngine()
|
||||
|
||||
success_count = 0
|
||||
error_count = 0
|
||||
total_recommendations = 0
|
||||
|
||||
for incident in incidents:
|
||||
try:
|
||||
self.stdout.write(f'Generating recommendations for: {incident.title}')
|
||||
|
||||
recommendations = recommendation_engine.generate_recommendations(
|
||||
incident_id=str(incident.id),
|
||||
max_recommendations=max_recommendations,
|
||||
min_confidence=0.5
|
||||
)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f' ✓ Generated {len(recommendations)} recommendations')
|
||||
)
|
||||
success_count += 1
|
||||
total_recommendations += len(recommendations)
|
||||
|
||||
except Exception as e:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f' ✗ Failed to generate recommendations: {str(e)}')
|
||||
)
|
||||
error_count += 1
|
||||
|
||||
# Summary
|
||||
self.stdout.write('\n' + '='*50)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'Recommendation generation completed:')
|
||||
)
|
||||
self.stdout.write(f' Incidents processed: {success_count}')
|
||||
self.stdout.write(f' Total recommendations generated: {total_recommendations}')
|
||||
if error_count > 0:
|
||||
self.stdout.write(
|
||||
self.style.ERROR(f' Failed: {error_count}')
|
||||
)
|
||||
self.stdout.write('='*50)
|
||||
@@ -0,0 +1,303 @@
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
from django.db.models import Count, Avg
|
||||
from knowledge_learning.models import LearningPattern, IncidentRecommendation, Postmortem
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'Update learning patterns based on incident data and recommendations'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument(
|
||||
'--days',
|
||||
type=int,
|
||||
default=30,
|
||||
help='Number of days back to analyze for pattern learning (default: 30)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--min-frequency',
|
||||
type=int,
|
||||
default=3,
|
||||
help='Minimum frequency required to create a pattern (default: 3)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
help='Show what patterns would be created/updated without actually doing it'
|
||||
)
|
||||
|
||||
def handle(self, *args, **options):
|
||||
days = options['days']
|
||||
min_frequency = options['min_frequency']
|
||||
dry_run = options['dry_run']
|
||||
|
||||
# Calculate date range
|
||||
end_date = timezone.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'Analyzing incidents from {start_date.date()} to {end_date.date()}')
|
||||
)
|
||||
|
||||
if dry_run:
|
||||
self.stdout.write(self.style.WARNING('DRY RUN - No patterns will be created/updated'))
|
||||
|
||||
# Analyze resolution patterns
|
||||
self.stdout.write('\nAnalyzing resolution patterns...')
|
||||
resolution_patterns = self.analyze_resolution_patterns(start_date, end_date, min_frequency, dry_run)
|
||||
|
||||
# Analyze prevention patterns
|
||||
self.stdout.write('\nAnalyzing prevention patterns...')
|
||||
prevention_patterns = self.analyze_prevention_patterns(start_date, end_date, min_frequency, dry_run)
|
||||
|
||||
# Analyze escalation patterns
|
||||
self.stdout.write('\nAnalyzing escalation patterns...')
|
||||
escalation_patterns = self.analyze_escalation_patterns(start_date, end_date, min_frequency, dry_run)
|
||||
|
||||
# Summary
|
||||
total_patterns = resolution_patterns + prevention_patterns + escalation_patterns
|
||||
|
||||
self.stdout.write('\n' + '='*50)
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(f'Learning pattern analysis completed:')
|
||||
)
|
||||
self.stdout.write(f' Resolution patterns: {resolution_patterns}')
|
||||
self.stdout.write(f' Prevention patterns: {prevention_patterns}')
|
||||
self.stdout.write(f' Escalation patterns: {escalation_patterns}')
|
||||
self.stdout.write(f' Total patterns: {total_patterns}')
|
||||
self.stdout.write('='*50)
|
||||
|
||||
def analyze_resolution_patterns(self, start_date, end_date, min_frequency, dry_run):
|
||||
"""Analyze patterns in incident resolution"""
|
||||
|
||||
# Get resolved incidents in the time period
|
||||
resolved_incidents = Incident.objects.filter(
|
||||
status__in=['RESOLVED', 'CLOSED'],
|
||||
resolved_at__gte=start_date,
|
||||
resolved_at__lte=end_date
|
||||
)
|
||||
|
||||
# Group by category and analyze resolution patterns
|
||||
category_patterns = {}
|
||||
|
||||
for incident in resolved_incidents:
|
||||
category = incident.category or 'Unknown'
|
||||
|
||||
if category not in category_patterns:
|
||||
category_patterns[category] = {
|
||||
'incidents': [],
|
||||
'resolution_times': [],
|
||||
'assigned_users': [],
|
||||
'severities': []
|
||||
}
|
||||
|
||||
pattern = category_patterns[category]
|
||||
pattern['incidents'].append(incident)
|
||||
|
||||
if incident.resolved_at:
|
||||
resolution_time = incident.resolved_at - incident.created_at
|
||||
pattern['resolution_times'].append(resolution_time)
|
||||
|
||||
if incident.assigned_to:
|
||||
pattern['assigned_users'].append(incident.assigned_to)
|
||||
|
||||
pattern['severities'].append(incident.severity)
|
||||
|
||||
patterns_created = 0
|
||||
|
||||
for category, data in category_patterns.items():
|
||||
if len(data['incidents']) >= min_frequency:
|
||||
# Calculate pattern statistics
|
||||
avg_resolution_time = sum(data['resolution_times'], timedelta()) / len(data['resolution_times'])
|
||||
common_users = [user for user, count in
|
||||
[(user, data['assigned_users'].count(user)) for user in set(data['assigned_users'])]
|
||||
if count >= min_frequency]
|
||||
|
||||
# Create or update pattern
|
||||
pattern_name = f"Resolution Pattern: {category}"
|
||||
|
||||
if not dry_run:
|
||||
pattern, created = LearningPattern.objects.get_or_create(
|
||||
name=pattern_name,
|
||||
pattern_type='RESOLUTION',
|
||||
defaults={
|
||||
'description': f"Resolution pattern for {category} incidents based on {len(data['incidents'])} resolved incidents",
|
||||
'frequency': len(data['incidents']),
|
||||
'success_rate': 1.0, # All resolved incidents are successful
|
||||
'confidence_score': min(len(data['incidents']) / 10.0, 1.0),
|
||||
'triggers': [category],
|
||||
'actions': [
|
||||
f"Average resolution time: {avg_resolution_time}",
|
||||
f"Common assignees: {', '.join([u.username for u in common_users[:3]])}" if common_users else "Various assignees"
|
||||
],
|
||||
'outcomes': ["Incident resolved successfully"]
|
||||
}
|
||||
)
|
||||
|
||||
if not created:
|
||||
# Update existing pattern
|
||||
pattern.frequency = len(data['incidents'])
|
||||
pattern.confidence_score = min(len(data['incidents']) / 10.0, 1.0)
|
||||
pattern.save()
|
||||
|
||||
# Add source incidents
|
||||
pattern.source_incidents.set(data['incidents'])
|
||||
|
||||
self.stdout.write(f' ✓ {pattern_name}: {len(data["incidents"])} incidents, avg resolution: {avg_resolution_time}')
|
||||
patterns_created += 1
|
||||
|
||||
return patterns_created
|
||||
|
||||
def analyze_prevention_patterns(self, start_date, end_date, min_frequency, dry_run):
|
||||
"""Analyze patterns for incident prevention"""
|
||||
|
||||
# Get postmortems with action items
|
||||
postmortems = Postmortem.objects.filter(
|
||||
status='PUBLISHED',
|
||||
created_at__gte=start_date,
|
||||
created_at__lte=end_date,
|
||||
action_items__isnull=False
|
||||
).exclude(action_items=[])
|
||||
|
||||
# Group action items by category
|
||||
prevention_actions = {}
|
||||
|
||||
for postmortem in postmortems:
|
||||
category = postmortem.incident.category or 'Unknown'
|
||||
|
||||
if category not in prevention_actions:
|
||||
prevention_actions[category] = []
|
||||
|
||||
for action_item in postmortem.action_items:
|
||||
if isinstance(action_item, dict) and action_item.get('category') == 'Prevention':
|
||||
prevention_actions[category].append({
|
||||
'action': action_item.get('title', ''),
|
||||
'description': action_item.get('description', ''),
|
||||
'postmortem': postmortem
|
||||
})
|
||||
|
||||
patterns_created = 0
|
||||
|
||||
for category, actions in prevention_actions.items():
|
||||
if len(actions) >= min_frequency:
|
||||
# Find common prevention actions
|
||||
action_counts = {}
|
||||
for action in actions:
|
||||
action_key = action['action']
|
||||
if action_key not in action_counts:
|
||||
action_counts[action_key] = 0
|
||||
action_counts[action_key] += 1
|
||||
|
||||
common_actions = [action for action, count in action_counts.items() if count >= min_frequency]
|
||||
|
||||
if common_actions:
|
||||
pattern_name = f"Prevention Pattern: {category}"
|
||||
|
||||
if not dry_run:
|
||||
pattern, created = LearningPattern.objects.get_or_create(
|
||||
name=pattern_name,
|
||||
pattern_type='PREVENTION',
|
||||
defaults={
|
||||
'description': f"Prevention pattern for {category} incidents based on {len(actions)} prevention actions",
|
||||
'frequency': len(actions),
|
||||
'success_rate': 0.8, # Assume 80% success rate for prevention
|
||||
'confidence_score': min(len(actions) / 10.0, 1.0),
|
||||
'triggers': [category],
|
||||
'actions': common_actions,
|
||||
'outcomes': ["Reduced incident frequency", "Improved system reliability"]
|
||||
}
|
||||
)
|
||||
|
||||
if not created:
|
||||
pattern.frequency = len(actions)
|
||||
pattern.confidence_score = min(len(actions) / 10.0, 1.0)
|
||||
pattern.save()
|
||||
|
||||
# Add source postmortems
|
||||
source_postmortems = [action['postmortem'] for action in actions]
|
||||
pattern.source_postmortems.set(source_postmortems)
|
||||
|
||||
self.stdout.write(f' ✓ {pattern_name}: {len(actions)} prevention actions, {len(common_actions)} common actions')
|
||||
patterns_created += 1
|
||||
|
||||
return patterns_created
|
||||
|
||||
def analyze_escalation_patterns(self, start_date, end_date, min_frequency, dry_run):
|
||||
"""Analyze patterns in incident escalation"""
|
||||
|
||||
# Get incidents that were escalated (changed severity or priority)
|
||||
# This is a simplified analysis - in production, you'd track escalation events
|
||||
escalated_incidents = Incident.objects.filter(
|
||||
created_at__gte=start_date,
|
||||
created_at__lte=end_date,
|
||||
severity__in=['HIGH', 'CRITICAL', 'EMERGENCY']
|
||||
)
|
||||
|
||||
# Group by category and analyze escalation patterns
|
||||
escalation_patterns = {}
|
||||
|
||||
for incident in escalated_incidents:
|
||||
category = incident.category or 'Unknown'
|
||||
|
||||
if category not in escalation_patterns:
|
||||
escalation_patterns[category] = {
|
||||
'incidents': [],
|
||||
'severities': [],
|
||||
'escalation_times': []
|
||||
}
|
||||
|
||||
pattern = escalation_patterns[category]
|
||||
pattern['incidents'].append(incident)
|
||||
pattern['severities'].append(incident.severity)
|
||||
|
||||
# Estimate escalation time (time from creation to first update)
|
||||
if incident.updated_at > incident.created_at:
|
||||
escalation_time = incident.updated_at - incident.created_at
|
||||
pattern['escalation_times'].append(escalation_time)
|
||||
|
||||
patterns_created = 0
|
||||
|
||||
for category, data in escalation_patterns.items():
|
||||
if len(data['incidents']) >= min_frequency:
|
||||
# Calculate escalation statistics
|
||||
avg_escalation_time = (sum(data['escalation_times'], timedelta()) /
|
||||
len(data['escalation_times'])) if data['escalation_times'] else timedelta()
|
||||
|
||||
severity_distribution = {}
|
||||
for severity in data['severities']:
|
||||
severity_distribution[severity] = severity_distribution.get(severity, 0) + 1
|
||||
|
||||
pattern_name = f"Escalation Pattern: {category}"
|
||||
|
||||
if not dry_run:
|
||||
pattern, created = LearningPattern.objects.get_or_create(
|
||||
name=pattern_name,
|
||||
pattern_type='ESCALATION',
|
||||
defaults={
|
||||
'description': f"Escalation pattern for {category} incidents based on {len(data['incidents'])} escalated incidents",
|
||||
'frequency': len(data['incidents']),
|
||||
'success_rate': 0.7, # Assume 70% success rate for escalation
|
||||
'confidence_score': min(len(data['incidents']) / 10.0, 1.0),
|
||||
'triggers': [category],
|
||||
'actions': [
|
||||
f"Average escalation time: {avg_escalation_time}",
|
||||
f"Severity distribution: {severity_distribution}"
|
||||
],
|
||||
'outcomes': ["Appropriate incident handling", "Faster resolution"]
|
||||
}
|
||||
)
|
||||
|
||||
if not created:
|
||||
pattern.frequency = len(data['incidents'])
|
||||
pattern.confidence_score = min(len(data['incidents']) / 10.0, 1.0)
|
||||
pattern.save()
|
||||
|
||||
# Add source incidents
|
||||
pattern.source_incidents.set(data['incidents'])
|
||||
|
||||
self.stdout.write(f' ✓ {pattern_name}: {len(data["incidents"])} incidents, avg escalation: {avg_escalation_time}')
|
||||
patterns_created += 1
|
||||
|
||||
return patterns_created
|
||||
255
ETB-API/knowledge_learning/migrations/0001_initial.py
Normal file
255
ETB-API/knowledge_learning/migrations/0001_initial.py
Normal file
@@ -0,0 +1,255 @@
|
||||
# Generated by Django 5.2.6 on 2025-09-18 17:34
|
||||
|
||||
import django.core.validators
|
||||
import django.db.models.deletion
|
||||
import uuid
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
initial = True
|
||||
|
||||
dependencies = [
|
||||
('incident_intelligence', '0004_incident_oncall_assignment_incident_sla_override_and_more'),
|
||||
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='KnowledgeBaseArticle',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('title', models.CharField(max_length=200)),
|
||||
('slug', models.SlugField(help_text='URL-friendly identifier', unique=True)),
|
||||
('content', models.TextField(help_text='Main article content')),
|
||||
('summary', models.TextField(help_text='Brief summary of the article')),
|
||||
('tags', models.JSONField(default=list, help_text='Tags for categorization and search')),
|
||||
('article_type', models.CharField(choices=[('RUNBOOK', 'Runbook'), ('TROUBLESHOOTING', 'Troubleshooting Guide'), ('BEST_PRACTICE', 'Best Practice'), ('LESSON_LEARNED', 'Lesson Learned'), ('PROCEDURE', 'Procedure'), ('REFERENCE', 'Reference'), ('WIKI', 'Wiki Article')], max_length=20)),
|
||||
('category', models.CharField(help_text='Primary category', max_length=100)),
|
||||
('subcategory', models.CharField(blank=True, max_length=100, null=True)),
|
||||
('related_services', models.JSONField(default=list, help_text='Services this article relates to')),
|
||||
('related_components', models.JSONField(default=list, help_text='Components this article relates to')),
|
||||
('status', models.CharField(choices=[('DRAFT', 'Draft'), ('REVIEW', 'Under Review'), ('APPROVED', 'Approved'), ('PUBLISHED', 'Published'), ('DEPRECATED', 'Deprecated')], default='DRAFT', max_length=20)),
|
||||
('is_featured', models.BooleanField(default=False, help_text='Whether this is a featured article')),
|
||||
('view_count', models.PositiveIntegerField(default=0, help_text='Number of times this article has been viewed')),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
('last_reviewed', models.DateTimeField(blank=True, null=True)),
|
||||
('next_review_due', models.DateTimeField(blank=True, null=True)),
|
||||
('confluence_url', models.URLField(blank=True, help_text='Link to Confluence page', null=True)),
|
||||
('wiki_url', models.URLField(blank=True, help_text='Link to wiki page', null=True)),
|
||||
('external_references', models.JSONField(default=list, help_text='External reference links')),
|
||||
('search_keywords', models.JSONField(default=list, help_text='Keywords for search optimization')),
|
||||
('difficulty_level', models.CharField(choices=[('BEGINNER', 'Beginner'), ('INTERMEDIATE', 'Intermediate'), ('ADVANCED', 'Advanced'), ('EXPERT', 'Expert')], default='INTERMEDIATE', max_length=20)),
|
||||
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='authored_articles', to=settings.AUTH_USER_MODEL)),
|
||||
('last_updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='updated_articles', to=settings.AUTH_USER_MODEL)),
|
||||
('maintainer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='maintained_articles', to=settings.AUTH_USER_MODEL)),
|
||||
('related_incidents', models.ManyToManyField(blank=True, help_text='Incidents that led to or are related to this article', related_name='knowledge_articles', to='incident_intelligence.incident')),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-updated_at', '-created_at'],
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='IncidentRecommendation',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('recommendation_type', models.CharField(choices=[('SIMILAR_INCIDENT', 'Similar Incident'), ('SOLUTION', 'Solution/Resolution'), ('KNOWLEDGE_ARTICLE', 'Knowledge Article'), ('RUNBOOK', 'Runbook'), ('EXPERT', 'Expert/Team'), ('PREVENTION', 'Prevention Strategy')], max_length=20)),
|
||||
('title', models.CharField(max_length=200)),
|
||||
('description', models.TextField(help_text='Description of the recommendation')),
|
||||
('similarity_score', models.FloatField(help_text='Similarity score between current and recommended incident (0.0-1.0)', validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(1.0)])),
|
||||
('confidence_level', models.CharField(choices=[('LOW', 'Low'), ('MEDIUM', 'Medium'), ('HIGH', 'High'), ('VERY_HIGH', 'Very High')], max_length=20)),
|
||||
('confidence_score', models.FloatField(help_text='AI confidence in this recommendation (0.0-1.0)', validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(1.0)])),
|
||||
('suggested_actions', models.JSONField(default=list, help_text='Suggested actions to take')),
|
||||
('expected_outcome', models.TextField(blank=True, help_text='Expected outcome of following this recommendation', null=True)),
|
||||
('time_to_implement', models.DurationField(blank=True, help_text='Estimated time to implement', null=True)),
|
||||
('is_applied', models.BooleanField(default=False, help_text='Whether this recommendation was applied')),
|
||||
('applied_at', models.DateTimeField(blank=True, null=True)),
|
||||
('effectiveness_rating', models.PositiveIntegerField(blank=True, help_text='User rating of recommendation effectiveness (1-5)', null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
|
||||
('reasoning', models.TextField(help_text='AI explanation for why this recommendation was made')),
|
||||
('matching_factors', models.JSONField(default=list, help_text='Factors that led to this recommendation')),
|
||||
('model_version', models.CharField(default='v1.0', max_length=50)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
('applied_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='applied_recommendations', to=settings.AUTH_USER_MODEL)),
|
||||
('incident', models.ForeignKey(help_text='Incident for which this recommendation is made', on_delete=django.db.models.deletion.CASCADE, related_name='recommendations', to='incident_intelligence.incident')),
|
||||
('related_incident', models.ForeignKey(blank=True, help_text='Related incident that this recommendation is based on', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='recommended_for', to='incident_intelligence.incident')),
|
||||
('suggested_expert', models.ForeignKey(blank=True, help_text='Suggested expert who can help with this incident', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
|
||||
('knowledge_article', models.ForeignKey(blank=True, help_text='Related knowledge base article', null=True, on_delete=django.db.models.deletion.CASCADE, to='knowledge_learning.knowledgebasearticle')),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-confidence_score', '-similarity_score'],
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='KnowledgeBaseUsage',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('usage_type', models.CharField(choices=[('VIEW', 'Article View'), ('APPLY', 'Recommendation Applied'), ('RATE', 'Rating Given'), ('SHARE', 'Shared'), ('BOOKMARK', 'Bookmarked')], max_length=20)),
|
||||
('context', models.JSONField(default=dict, help_text='Additional context about the usage')),
|
||||
('session_id', models.CharField(blank=True, max_length=100, null=True)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('incident', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='knowledge_usage', to='incident_intelligence.incident')),
|
||||
('knowledge_article', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='usage_logs', to='knowledge_learning.knowledgebasearticle')),
|
||||
('recommendation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='usage_logs', to='knowledge_learning.incidentrecommendation')),
|
||||
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='knowledge_usage', to=settings.AUTH_USER_MODEL)),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-created_at'],
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='Postmortem',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('title', models.CharField(max_length=200)),
|
||||
('executive_summary', models.TextField(help_text='High-level summary for executives')),
|
||||
('timeline', models.JSONField(default=list, help_text='Detailed timeline of events')),
|
||||
('root_cause_analysis', models.TextField(help_text='Analysis of root causes')),
|
||||
('impact_assessment', models.TextField(help_text='Assessment of business and technical impact')),
|
||||
('lessons_learned', models.TextField(help_text='Key lessons learned from the incident')),
|
||||
('action_items', models.JSONField(default=list, help_text='List of action items to prevent recurrence')),
|
||||
('is_automated', models.BooleanField(default=True, help_text='Whether this postmortem was auto-generated')),
|
||||
('generation_confidence', models.FloatField(blank=True, help_text='AI confidence in postmortem quality (0.0-1.0)', null=True, validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(1.0)])),
|
||||
('auto_generated_sections', models.JSONField(default=list, help_text='List of sections that were auto-generated')),
|
||||
('status', models.CharField(choices=[('DRAFT', 'Draft'), ('IN_REVIEW', 'In Review'), ('APPROVED', 'Approved'), ('PUBLISHED', 'Published'), ('ARCHIVED', 'Archived')], default='DRAFT', max_length=20)),
|
||||
('severity', models.CharField(choices=[('LOW', 'Low'), ('MEDIUM', 'Medium'), ('HIGH', 'High'), ('CRITICAL', 'Critical')], max_length=20)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
('published_at', models.DateTimeField(blank=True, null=True)),
|
||||
('due_date', models.DateTimeField(blank=True, help_text='When this postmortem should be completed', null=True)),
|
||||
('affected_services', models.JSONField(default=list, help_text='Services affected by the incident')),
|
||||
('affected_teams', models.JSONField(default=list, help_text='Teams involved in the incident')),
|
||||
('approver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='approved_postmortems', to=settings.AUTH_USER_MODEL)),
|
||||
('incident', models.ForeignKey(help_text='Primary incident this postmortem is about', on_delete=django.db.models.deletion.CASCADE, related_name='postmortems', to='incident_intelligence.incident')),
|
||||
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='owned_postmortems', to=settings.AUTH_USER_MODEL)),
|
||||
('related_incidents', models.ManyToManyField(blank=True, help_text='Other incidents related to this postmortem', related_name='related_postmortems', to='incident_intelligence.incident')),
|
||||
('reviewers', models.ManyToManyField(blank=True, related_name='reviewed_postmortems', to=settings.AUTH_USER_MODEL)),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-created_at'],
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='LearningPattern',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('name', models.CharField(max_length=200)),
|
||||
('pattern_type', models.CharField(choices=[('ROOT_CAUSE', 'Root Cause Pattern'), ('RESOLUTION', 'Resolution Pattern'), ('PREVENTION', 'Prevention Pattern'), ('ESCALATION', 'Escalation Pattern'), ('COMMUNICATION', 'Communication Pattern')], max_length=20)),
|
||||
('description', models.TextField()),
|
||||
('frequency', models.PositiveIntegerField(default=1, help_text='How many times this pattern has been observed')),
|
||||
('success_rate', models.FloatField(help_text='Success rate when this pattern is applied (0.0-1.0)', validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(1.0)])),
|
||||
('confidence_score', models.FloatField(help_text="Confidence in this pattern's validity (0.0-1.0)", validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(1.0)])),
|
||||
('triggers', models.JSONField(default=list, help_text='Conditions that trigger this pattern')),
|
||||
('actions', models.JSONField(default=list, help_text='Actions associated with this pattern')),
|
||||
('outcomes', models.JSONField(default=list, help_text='Expected outcomes of this pattern')),
|
||||
('is_validated', models.BooleanField(default=False, help_text='Whether this pattern has been validated by experts')),
|
||||
('validation_notes', models.TextField(blank=True, null=True)),
|
||||
('times_applied', models.PositiveIntegerField(default=0, help_text='Number of times this pattern has been applied')),
|
||||
('last_applied', models.DateTimeField(blank=True, null=True)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
('source_incidents', models.ManyToManyField(help_text='Incidents that contributed to this pattern', related_name='learning_patterns', to='incident_intelligence.incident')),
|
||||
('validated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='validated_patterns', to=settings.AUTH_USER_MODEL)),
|
||||
('source_postmortems', models.ManyToManyField(help_text='Postmortems that contributed to this pattern', related_name='learning_patterns', to='knowledge_learning.postmortem')),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-confidence_score', '-frequency'],
|
||||
},
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='knowledgebasearticle',
|
||||
name='source_postmortems',
|
||||
field=models.ManyToManyField(blank=True, help_text='Postmortems that generated this article', related_name='generated_articles', to='knowledge_learning.postmortem'),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='AutomatedPostmortemGeneration',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('status', models.CharField(choices=[('PENDING', 'Pending'), ('PROCESSING', 'Processing'), ('COMPLETED', 'Completed'), ('FAILED', 'Failed'), ('REVIEW_REQUIRED', 'Review Required')], default='PENDING', max_length=20)),
|
||||
('generation_trigger', models.CharField(help_text='What triggered the generation', max_length=50)),
|
||||
('incident_data', models.JSONField(help_text='Incident data used for generation')),
|
||||
('timeline_data', models.JSONField(help_text='Timeline data used for generation')),
|
||||
('log_data', models.JSONField(default=list, help_text='Log data used for generation')),
|
||||
('generated_content', models.JSONField(blank=True, help_text='Generated postmortem content', null=True)),
|
||||
('confidence_scores', models.JSONField(default=dict, help_text='Confidence scores for different sections')),
|
||||
('quality_metrics', models.JSONField(default=dict, help_text='Quality metrics for generated content')),
|
||||
('processing_time', models.FloatField(blank=True, help_text='Time taken for generation in seconds', null=True)),
|
||||
('model_version', models.CharField(default='v1.0', max_length=50)),
|
||||
('error_message', models.TextField(blank=True, null=True)),
|
||||
('started_at', models.DateTimeField(auto_now_add=True)),
|
||||
('completed_at', models.DateTimeField(blank=True, null=True)),
|
||||
('incident', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='postmortem_generations', to='incident_intelligence.incident')),
|
||||
('generated_postmortem', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='generation_log', to='knowledge_learning.postmortem')),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-started_at'],
|
||||
},
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='incidentrecommendation',
|
||||
index=models.Index(fields=['incident', 'recommendation_type'], name='knowledge_l_inciden_382eaa_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='incidentrecommendation',
|
||||
index=models.Index(fields=['confidence_score', 'similarity_score'], name='knowledge_l_confide_478c88_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='incidentrecommendation',
|
||||
index=models.Index(fields=['is_applied'], name='knowledge_l_is_appl_5da7c0_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='knowledgebaseusage',
|
||||
index=models.Index(fields=['user', 'usage_type'], name='knowledge_l_user_id_c46f08_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='knowledgebaseusage',
|
||||
index=models.Index(fields=['created_at'], name='knowledge_l_created_eaa6d6_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='postmortem',
|
||||
index=models.Index(fields=['status', 'severity'], name='knowledge_l_status_f6ad36_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='postmortem',
|
||||
index=models.Index(fields=['incident', 'status'], name='knowledge_l_inciden_7ebad7_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='postmortem',
|
||||
index=models.Index(fields=['created_at'], name='knowledge_l_created_4128f5_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='learningpattern',
|
||||
index=models.Index(fields=['pattern_type', 'confidence_score'], name='knowledge_l_pattern_a8d632_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='learningpattern',
|
||||
index=models.Index(fields=['is_validated'], name='knowledge_l_is_vali_d95c24_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='knowledgebasearticle',
|
||||
index=models.Index(fields=['article_type', 'status'], name='knowledge_l_article_05399a_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='knowledgebasearticle',
|
||||
index=models.Index(fields=['category', 'subcategory'], name='knowledge_l_categor_622312_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='knowledgebasearticle',
|
||||
index=models.Index(fields=['status', 'is_featured'], name='knowledge_l_status_6b05ce_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='knowledgebasearticle',
|
||||
index=models.Index(fields=['created_at'], name='knowledge_l_created_49b5e7_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='automatedpostmortemgeneration',
|
||||
index=models.Index(fields=['status', 'started_at'], name='knowledge_l_status_90fde2_idx'),
|
||||
),
|
||||
migrations.AddIndex(
|
||||
model_name='automatedpostmortemgeneration',
|
||||
index=models.Index(fields=['incident', 'status'], name='knowledge_l_inciden_e3b5fd_idx'),
|
||||
),
|
||||
]
|
||||
0
ETB-API/knowledge_learning/migrations/__init__.py
Normal file
0
ETB-API/knowledge_learning/migrations/__init__.py
Normal file
Binary file not shown.
Binary file not shown.
509
ETB-API/knowledge_learning/models.py
Normal file
509
ETB-API/knowledge_learning/models.py
Normal file
@@ -0,0 +1,509 @@
|
||||
from django.db import models
|
||||
from django.contrib.auth import get_user_model
|
||||
from django.core.validators import MinValueValidator, MaxValueValidator
|
||||
from django.utils import timezone
|
||||
import uuid
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
class Postmortem(models.Model):
|
||||
"""Automated postmortem generation and management"""
|
||||
|
||||
STATUS_CHOICES = [
|
||||
('DRAFT', 'Draft'),
|
||||
('IN_REVIEW', 'In Review'),
|
||||
('APPROVED', 'Approved'),
|
||||
('PUBLISHED', 'Published'),
|
||||
('ARCHIVED', 'Archived'),
|
||||
]
|
||||
|
||||
SEVERITY_CHOICES = [
|
||||
('LOW', 'Low'),
|
||||
('MEDIUM', 'Medium'),
|
||||
('HIGH', 'High'),
|
||||
('CRITICAL', 'Critical'),
|
||||
]
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
title = models.CharField(max_length=200)
|
||||
incident = models.ForeignKey(
|
||||
'incident_intelligence.Incident',
|
||||
on_delete=models.CASCADE,
|
||||
related_name='postmortems',
|
||||
help_text="Primary incident this postmortem is about"
|
||||
)
|
||||
|
||||
# Postmortem content
|
||||
executive_summary = models.TextField(help_text="High-level summary for executives")
|
||||
timeline = models.JSONField(default=list, help_text="Detailed timeline of events")
|
||||
root_cause_analysis = models.TextField(help_text="Analysis of root causes")
|
||||
impact_assessment = models.TextField(help_text="Assessment of business and technical impact")
|
||||
lessons_learned = models.TextField(help_text="Key lessons learned from the incident")
|
||||
action_items = models.JSONField(default=list, help_text="List of action items to prevent recurrence")
|
||||
|
||||
# Automated generation
|
||||
is_automated = models.BooleanField(default=True, help_text="Whether this postmortem was auto-generated")
|
||||
generation_confidence = models.FloatField(
|
||||
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
|
||||
null=True, blank=True,
|
||||
help_text="AI confidence in postmortem quality (0.0-1.0)"
|
||||
)
|
||||
auto_generated_sections = models.JSONField(
|
||||
default=list,
|
||||
help_text="List of sections that were auto-generated"
|
||||
)
|
||||
|
||||
# Status and workflow
|
||||
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='DRAFT')
|
||||
severity = models.CharField(max_length=20, choices=SEVERITY_CHOICES)
|
||||
|
||||
# Ownership and review
|
||||
owner = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, related_name='owned_postmortems')
|
||||
reviewers = models.ManyToManyField(User, related_name='reviewed_postmortems', blank=True)
|
||||
approver = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, related_name='approved_postmortems')
|
||||
|
||||
# Timestamps
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
published_at = models.DateTimeField(null=True, blank=True)
|
||||
due_date = models.DateTimeField(null=True, blank=True, help_text="When this postmortem should be completed")
|
||||
|
||||
# Related incidents and context
|
||||
related_incidents = models.ManyToManyField(
|
||||
'incident_intelligence.Incident',
|
||||
related_name='related_postmortems',
|
||||
blank=True,
|
||||
help_text="Other incidents related to this postmortem"
|
||||
)
|
||||
affected_services = models.JSONField(default=list, help_text="Services affected by the incident")
|
||||
affected_teams = models.JSONField(default=list, help_text="Teams involved in the incident")
|
||||
|
||||
class Meta:
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['status', 'severity']),
|
||||
models.Index(fields=['incident', 'status']),
|
||||
models.Index(fields=['created_at']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"Postmortem: {self.title}"
|
||||
|
||||
@property
|
||||
def is_overdue(self):
|
||||
"""Check if postmortem is overdue"""
|
||||
if self.due_date and self.status not in ['APPROVED', 'PUBLISHED']:
|
||||
return timezone.now() > self.due_date
|
||||
return False
|
||||
|
||||
def get_completion_percentage(self):
|
||||
"""Calculate completion percentage based on filled sections"""
|
||||
sections = [
|
||||
self.executive_summary,
|
||||
self.timeline,
|
||||
self.root_cause_analysis,
|
||||
self.impact_assessment,
|
||||
self.lessons_learned,
|
||||
self.action_items,
|
||||
]
|
||||
filled_sections = sum(1 for section in sections if section)
|
||||
return (filled_sections / len(sections)) * 100
|
||||
|
||||
|
||||
class KnowledgeBaseArticle(models.Model):
|
||||
"""Knowledge base articles for incident resolution and learning"""
|
||||
|
||||
ARTICLE_TYPE_CHOICES = [
|
||||
('RUNBOOK', 'Runbook'),
|
||||
('TROUBLESHOOTING', 'Troubleshooting Guide'),
|
||||
('BEST_PRACTICE', 'Best Practice'),
|
||||
('LESSON_LEARNED', 'Lesson Learned'),
|
||||
('PROCEDURE', 'Procedure'),
|
||||
('REFERENCE', 'Reference'),
|
||||
('WIKI', 'Wiki Article'),
|
||||
]
|
||||
|
||||
STATUS_CHOICES = [
|
||||
('DRAFT', 'Draft'),
|
||||
('REVIEW', 'Under Review'),
|
||||
('APPROVED', 'Approved'),
|
||||
('PUBLISHED', 'Published'),
|
||||
('DEPRECATED', 'Deprecated'),
|
||||
]
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
title = models.CharField(max_length=200)
|
||||
slug = models.SlugField(unique=True, help_text="URL-friendly identifier")
|
||||
|
||||
# Content
|
||||
content = models.TextField(help_text="Main article content")
|
||||
summary = models.TextField(help_text="Brief summary of the article")
|
||||
tags = models.JSONField(default=list, help_text="Tags for categorization and search")
|
||||
|
||||
# Classification
|
||||
article_type = models.CharField(max_length=20, choices=ARTICLE_TYPE_CHOICES)
|
||||
category = models.CharField(max_length=100, help_text="Primary category")
|
||||
subcategory = models.CharField(max_length=100, blank=True, null=True)
|
||||
|
||||
# Related services and components
|
||||
related_services = models.JSONField(default=list, help_text="Services this article relates to")
|
||||
related_components = models.JSONField(default=list, help_text="Components this article relates to")
|
||||
|
||||
# Status and workflow
|
||||
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='DRAFT')
|
||||
is_featured = models.BooleanField(default=False, help_text="Whether this is a featured article")
|
||||
view_count = models.PositiveIntegerField(default=0, help_text="Number of times this article has been viewed")
|
||||
|
||||
# Ownership and maintenance
|
||||
author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, related_name='authored_articles')
|
||||
last_updated_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, related_name='updated_articles')
|
||||
maintainer = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True, related_name='maintained_articles')
|
||||
|
||||
# Timestamps
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
last_reviewed = models.DateTimeField(null=True, blank=True)
|
||||
next_review_due = models.DateTimeField(null=True, blank=True)
|
||||
|
||||
# Related incidents and postmortems
|
||||
related_incidents = models.ManyToManyField(
|
||||
'incident_intelligence.Incident',
|
||||
related_name='knowledge_articles',
|
||||
blank=True,
|
||||
help_text="Incidents that led to or are related to this article"
|
||||
)
|
||||
source_postmortems = models.ManyToManyField(
|
||||
Postmortem,
|
||||
related_name='generated_articles',
|
||||
blank=True,
|
||||
help_text="Postmortems that generated this article"
|
||||
)
|
||||
|
||||
# External integrations
|
||||
confluence_url = models.URLField(blank=True, null=True, help_text="Link to Confluence page")
|
||||
wiki_url = models.URLField(blank=True, null=True, help_text="Link to wiki page")
|
||||
external_references = models.JSONField(default=list, help_text="External reference links")
|
||||
|
||||
# Search and discovery
|
||||
search_keywords = models.JSONField(default=list, help_text="Keywords for search optimization")
|
||||
difficulty_level = models.CharField(max_length=20, choices=[
|
||||
('BEGINNER', 'Beginner'),
|
||||
('INTERMEDIATE', 'Intermediate'),
|
||||
('ADVANCED', 'Advanced'),
|
||||
('EXPERT', 'Expert'),
|
||||
], default='INTERMEDIATE')
|
||||
|
||||
class Meta:
|
||||
ordering = ['-updated_at', '-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['article_type', 'status']),
|
||||
models.Index(fields=['category', 'subcategory']),
|
||||
models.Index(fields=['status', 'is_featured']),
|
||||
models.Index(fields=['created_at']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"KB Article: {self.title}"
|
||||
|
||||
def increment_view_count(self):
|
||||
"""Increment the view count"""
|
||||
self.view_count += 1
|
||||
self.save(update_fields=['view_count'])
|
||||
|
||||
def is_due_for_review(self):
|
||||
"""Check if article is due for review"""
|
||||
if self.next_review_due:
|
||||
return timezone.now() > self.next_review_due
|
||||
return False
|
||||
|
||||
|
||||
class IncidentRecommendation(models.Model):
|
||||
"""Recommendation engine for suggesting similar incidents and solutions"""
|
||||
|
||||
RECOMMENDATION_TYPE_CHOICES = [
|
||||
('SIMILAR_INCIDENT', 'Similar Incident'),
|
||||
('SOLUTION', 'Solution/Resolution'),
|
||||
('KNOWLEDGE_ARTICLE', 'Knowledge Article'),
|
||||
('RUNBOOK', 'Runbook'),
|
||||
('EXPERT', 'Expert/Team'),
|
||||
('PREVENTION', 'Prevention Strategy'),
|
||||
]
|
||||
|
||||
CONFIDENCE_LEVEL_CHOICES = [
|
||||
('LOW', 'Low'),
|
||||
('MEDIUM', 'Medium'),
|
||||
('HIGH', 'High'),
|
||||
('VERY_HIGH', 'Very High'),
|
||||
]
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
incident = models.ForeignKey(
|
||||
'incident_intelligence.Incident',
|
||||
on_delete=models.CASCADE,
|
||||
related_name='recommendations',
|
||||
help_text="Incident for which this recommendation is made"
|
||||
)
|
||||
|
||||
# Recommendation details
|
||||
recommendation_type = models.CharField(max_length=20, choices=RECOMMENDATION_TYPE_CHOICES)
|
||||
title = models.CharField(max_length=200)
|
||||
description = models.TextField(help_text="Description of the recommendation")
|
||||
|
||||
# Similarity and confidence
|
||||
similarity_score = models.FloatField(
|
||||
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
|
||||
help_text="Similarity score between current and recommended incident (0.0-1.0)"
|
||||
)
|
||||
confidence_level = models.CharField(max_length=20, choices=CONFIDENCE_LEVEL_CHOICES)
|
||||
confidence_score = models.FloatField(
|
||||
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
|
||||
help_text="AI confidence in this recommendation (0.0-1.0)"
|
||||
)
|
||||
|
||||
# Related objects
|
||||
related_incident = models.ForeignKey(
|
||||
'incident_intelligence.Incident',
|
||||
on_delete=models.CASCADE,
|
||||
null=True, blank=True,
|
||||
related_name='recommended_for',
|
||||
help_text="Related incident that this recommendation is based on"
|
||||
)
|
||||
knowledge_article = models.ForeignKey(
|
||||
KnowledgeBaseArticle,
|
||||
on_delete=models.CASCADE,
|
||||
null=True, blank=True,
|
||||
help_text="Related knowledge base article"
|
||||
)
|
||||
suggested_expert = models.ForeignKey(
|
||||
User,
|
||||
on_delete=models.SET_NULL,
|
||||
null=True, blank=True,
|
||||
help_text="Suggested expert who can help with this incident"
|
||||
)
|
||||
|
||||
# Recommendation content
|
||||
suggested_actions = models.JSONField(default=list, help_text="Suggested actions to take")
|
||||
expected_outcome = models.TextField(blank=True, null=True, help_text="Expected outcome of following this recommendation")
|
||||
time_to_implement = models.DurationField(null=True, blank=True, help_text="Estimated time to implement")
|
||||
|
||||
# Usage tracking
|
||||
is_applied = models.BooleanField(default=False, help_text="Whether this recommendation was applied")
|
||||
applied_at = models.DateTimeField(null=True, blank=True)
|
||||
applied_by = models.ForeignKey(
|
||||
User,
|
||||
on_delete=models.SET_NULL,
|
||||
null=True, blank=True,
|
||||
related_name='applied_recommendations'
|
||||
)
|
||||
effectiveness_rating = models.PositiveIntegerField(
|
||||
null=True, blank=True,
|
||||
validators=[MinValueValidator(1), MaxValueValidator(5)],
|
||||
help_text="User rating of recommendation effectiveness (1-5)"
|
||||
)
|
||||
|
||||
# AI analysis
|
||||
reasoning = models.TextField(help_text="AI explanation for why this recommendation was made")
|
||||
matching_factors = models.JSONField(default=list, help_text="Factors that led to this recommendation")
|
||||
model_version = models.CharField(max_length=50, default='v1.0')
|
||||
|
||||
# Timestamps
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ['-confidence_score', '-similarity_score']
|
||||
indexes = [
|
||||
models.Index(fields=['incident', 'recommendation_type']),
|
||||
models.Index(fields=['confidence_score', 'similarity_score']),
|
||||
models.Index(fields=['is_applied']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"Recommendation: {self.title} for {self.incident.title}"
|
||||
|
||||
|
||||
class LearningPattern(models.Model):
|
||||
"""Patterns learned from incidents and postmortems"""
|
||||
|
||||
PATTERN_TYPE_CHOICES = [
|
||||
('ROOT_CAUSE', 'Root Cause Pattern'),
|
||||
('RESOLUTION', 'Resolution Pattern'),
|
||||
('PREVENTION', 'Prevention Pattern'),
|
||||
('ESCALATION', 'Escalation Pattern'),
|
||||
('COMMUNICATION', 'Communication Pattern'),
|
||||
]
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
name = models.CharField(max_length=200)
|
||||
pattern_type = models.CharField(max_length=20, choices=PATTERN_TYPE_CHOICES)
|
||||
description = models.TextField()
|
||||
|
||||
# Pattern characteristics
|
||||
frequency = models.PositiveIntegerField(default=1, help_text="How many times this pattern has been observed")
|
||||
success_rate = models.FloatField(
|
||||
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
|
||||
help_text="Success rate when this pattern is applied (0.0-1.0)"
|
||||
)
|
||||
confidence_score = models.FloatField(
|
||||
validators=[MinValueValidator(0.0), MaxValueValidator(1.0)],
|
||||
help_text="Confidence in this pattern's validity (0.0-1.0)"
|
||||
)
|
||||
|
||||
# Pattern details
|
||||
triggers = models.JSONField(default=list, help_text="Conditions that trigger this pattern")
|
||||
actions = models.JSONField(default=list, help_text="Actions associated with this pattern")
|
||||
outcomes = models.JSONField(default=list, help_text="Expected outcomes of this pattern")
|
||||
|
||||
# Related data
|
||||
source_incidents = models.ManyToManyField(
|
||||
'incident_intelligence.Incident',
|
||||
related_name='learning_patterns',
|
||||
help_text="Incidents that contributed to this pattern"
|
||||
)
|
||||
source_postmortems = models.ManyToManyField(
|
||||
Postmortem,
|
||||
related_name='learning_patterns',
|
||||
help_text="Postmortems that contributed to this pattern"
|
||||
)
|
||||
|
||||
# Pattern validation
|
||||
is_validated = models.BooleanField(default=False, help_text="Whether this pattern has been validated by experts")
|
||||
validated_by = models.ForeignKey(
|
||||
User,
|
||||
on_delete=models.SET_NULL,
|
||||
null=True, blank=True,
|
||||
related_name='validated_patterns'
|
||||
)
|
||||
validation_notes = models.TextField(blank=True, null=True)
|
||||
|
||||
# Usage tracking
|
||||
times_applied = models.PositiveIntegerField(default=0, help_text="Number of times this pattern has been applied")
|
||||
last_applied = models.DateTimeField(null=True, blank=True)
|
||||
|
||||
# Timestamps
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ['-confidence_score', '-frequency']
|
||||
indexes = [
|
||||
models.Index(fields=['pattern_type', 'confidence_score']),
|
||||
models.Index(fields=['is_validated']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"Pattern: {self.name} ({self.pattern_type})"
|
||||
|
||||
|
||||
class KnowledgeBaseUsage(models.Model):
|
||||
"""Track usage of knowledge base articles and recommendations"""
|
||||
|
||||
USAGE_TYPE_CHOICES = [
|
||||
('VIEW', 'Article View'),
|
||||
('APPLY', 'Recommendation Applied'),
|
||||
('RATE', 'Rating Given'),
|
||||
('SHARE', 'Shared'),
|
||||
('BOOKMARK', 'Bookmarked'),
|
||||
]
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='knowledge_usage')
|
||||
usage_type = models.CharField(max_length=20, choices=USAGE_TYPE_CHOICES)
|
||||
|
||||
# Related objects
|
||||
knowledge_article = models.ForeignKey(
|
||||
KnowledgeBaseArticle,
|
||||
on_delete=models.CASCADE,
|
||||
null=True, blank=True,
|
||||
related_name='usage_logs'
|
||||
)
|
||||
recommendation = models.ForeignKey(
|
||||
IncidentRecommendation,
|
||||
on_delete=models.CASCADE,
|
||||
null=True, blank=True,
|
||||
related_name='usage_logs'
|
||||
)
|
||||
incident = models.ForeignKey(
|
||||
'incident_intelligence.Incident',
|
||||
on_delete=models.CASCADE,
|
||||
null=True, blank=True,
|
||||
related_name='knowledge_usage'
|
||||
)
|
||||
|
||||
# Usage context
|
||||
context = models.JSONField(default=dict, help_text="Additional context about the usage")
|
||||
session_id = models.CharField(max_length=100, blank=True, null=True)
|
||||
|
||||
# Timestamps
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ['-created_at']
|
||||
indexes = [
|
||||
models.Index(fields=['user', 'usage_type']),
|
||||
models.Index(fields=['created_at']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.usage_type} by {self.user.username}"
|
||||
|
||||
|
||||
class AutomatedPostmortemGeneration(models.Model):
|
||||
"""Track automated postmortem generation attempts and results"""
|
||||
|
||||
STATUS_CHOICES = [
|
||||
('PENDING', 'Pending'),
|
||||
('PROCESSING', 'Processing'),
|
||||
('COMPLETED', 'Completed'),
|
||||
('FAILED', 'Failed'),
|
||||
('REVIEW_REQUIRED', 'Review Required'),
|
||||
]
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
incident = models.ForeignKey(
|
||||
'incident_intelligence.Incident',
|
||||
on_delete=models.CASCADE,
|
||||
related_name='postmortem_generations'
|
||||
)
|
||||
|
||||
# Generation details
|
||||
status = models.CharField(max_length=20, choices=STATUS_CHOICES, default='PENDING')
|
||||
generation_trigger = models.CharField(max_length=50, help_text="What triggered the generation")
|
||||
|
||||
# Input data
|
||||
incident_data = models.JSONField(help_text="Incident data used for generation")
|
||||
timeline_data = models.JSONField(help_text="Timeline data used for generation")
|
||||
log_data = models.JSONField(default=list, help_text="Log data used for generation")
|
||||
|
||||
# Generation results
|
||||
generated_content = models.JSONField(null=True, blank=True, help_text="Generated postmortem content")
|
||||
confidence_scores = models.JSONField(default=dict, help_text="Confidence scores for different sections")
|
||||
quality_metrics = models.JSONField(default=dict, help_text="Quality metrics for generated content")
|
||||
|
||||
# Postmortem relationship
|
||||
generated_postmortem = models.OneToOneField(
|
||||
Postmortem,
|
||||
on_delete=models.CASCADE,
|
||||
null=True, blank=True,
|
||||
related_name='generation_log'
|
||||
)
|
||||
|
||||
# Processing details
|
||||
processing_time = models.FloatField(null=True, blank=True, help_text="Time taken for generation in seconds")
|
||||
model_version = models.CharField(max_length=50, default='v1.0')
|
||||
error_message = models.TextField(blank=True, null=True)
|
||||
|
||||
# Timestamps
|
||||
started_at = models.DateTimeField(auto_now_add=True)
|
||||
completed_at = models.DateTimeField(null=True, blank=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ['-started_at']
|
||||
indexes = [
|
||||
models.Index(fields=['status', 'started_at']),
|
||||
models.Index(fields=['incident', 'status']),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return f"Postmortem Generation for {self.incident.title} - {self.status}"
|
||||
1
ETB-API/knowledge_learning/serializers/__init__.py
Normal file
1
ETB-API/knowledge_learning/serializers/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Knowledge Learning Serializers
|
||||
Binary file not shown.
Binary file not shown.
316
ETB-API/knowledge_learning/serializers/knowledge.py
Normal file
316
ETB-API/knowledge_learning/serializers/knowledge.py
Normal file
@@ -0,0 +1,316 @@
|
||||
from rest_framework import serializers
|
||||
from django.contrib.auth import get_user_model
|
||||
from ..models import (
|
||||
Postmortem, KnowledgeBaseArticle, IncidentRecommendation,
|
||||
LearningPattern, KnowledgeBaseUsage, AutomatedPostmortemGeneration
|
||||
)
|
||||
|
||||
User = get_user_model()
|
||||
|
||||
|
||||
class PostmortemSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for Postmortem model"""
|
||||
|
||||
owner_username = serializers.CharField(source='owner.username', read_only=True)
|
||||
approver_username = serializers.CharField(source='approver.username', read_only=True)
|
||||
reviewer_usernames = serializers.StringRelatedField(source='reviewers', many=True, read_only=True)
|
||||
incident_title = serializers.CharField(source='incident.title', read_only=True)
|
||||
completion_percentage = serializers.ReadOnlyField()
|
||||
is_overdue = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = Postmortem
|
||||
fields = [
|
||||
'id', 'title', 'incident', 'incident_title',
|
||||
'executive_summary', 'timeline', 'root_cause_analysis',
|
||||
'impact_assessment', 'lessons_learned', 'action_items',
|
||||
'is_automated', 'generation_confidence', 'auto_generated_sections',
|
||||
'status', 'severity', 'owner', 'owner_username',
|
||||
'reviewers', 'reviewer_usernames', 'approver', 'approver_username',
|
||||
'created_at', 'updated_at', 'published_at', 'due_date',
|
||||
'related_incidents', 'affected_services', 'affected_teams',
|
||||
'completion_percentage', 'is_overdue'
|
||||
]
|
||||
read_only_fields = ['id', 'created_at', 'updated_at']
|
||||
|
||||
def validate(self, data):
|
||||
"""Validate postmortem data"""
|
||||
if data.get('due_date') and data['due_date'] < timezone.now():
|
||||
raise serializers.ValidationError("Due date cannot be in the past")
|
||||
return data
|
||||
|
||||
|
||||
class PostmortemListSerializer(serializers.ModelSerializer):
|
||||
"""Simplified serializer for postmortem lists"""
|
||||
|
||||
owner_username = serializers.CharField(source='owner.username', read_only=True)
|
||||
incident_title = serializers.CharField(source='incident.title', read_only=True)
|
||||
completion_percentage = serializers.ReadOnlyField()
|
||||
is_overdue = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = Postmortem
|
||||
fields = [
|
||||
'id', 'title', 'incident', 'incident_title',
|
||||
'status', 'severity', 'owner_username',
|
||||
'created_at', 'due_date', 'completion_percentage', 'is_overdue'
|
||||
]
|
||||
|
||||
|
||||
class KnowledgeBaseArticleSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for KnowledgeBaseArticle model"""
|
||||
|
||||
author_username = serializers.CharField(source='author.username', read_only=True)
|
||||
last_updated_by_username = serializers.CharField(source='last_updated_by.username', read_only=True)
|
||||
maintainer_username = serializers.CharField(source='maintainer.username', read_only=True)
|
||||
is_due_for_review = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = KnowledgeBaseArticle
|
||||
fields = [
|
||||
'id', 'title', 'slug', 'content', 'summary', 'tags',
|
||||
'article_type', 'category', 'subcategory',
|
||||
'related_services', 'related_components',
|
||||
'status', 'is_featured', 'view_count',
|
||||
'author', 'author_username', 'last_updated_by', 'last_updated_by_username',
|
||||
'maintainer', 'maintainer_username',
|
||||
'created_at', 'updated_at', 'last_reviewed', 'next_review_due',
|
||||
'related_incidents', 'source_postmortems',
|
||||
'confluence_url', 'wiki_url', 'external_references',
|
||||
'search_keywords', 'difficulty_level', 'is_due_for_review'
|
||||
]
|
||||
read_only_fields = ['id', 'created_at', 'updated_at', 'view_count']
|
||||
|
||||
def create(self, validated_data):
|
||||
"""Create a new knowledge base article"""
|
||||
if not validated_data.get('slug'):
|
||||
validated_data['slug'] = self.generate_slug(validated_data['title'])
|
||||
return super().create(validated_data)
|
||||
|
||||
def generate_slug(self, title):
|
||||
"""Generate a URL-friendly slug from title"""
|
||||
import re
|
||||
slug = re.sub(r'[^\w\s-]', '', title.lower())
|
||||
slug = re.sub(r'[-\s]+', '-', slug)
|
||||
return slug[:50] # Limit slug length
|
||||
|
||||
|
||||
class KnowledgeBaseArticleListSerializer(serializers.ModelSerializer):
|
||||
"""Simplified serializer for knowledge base article lists"""
|
||||
|
||||
author_username = serializers.CharField(source='author.username', read_only=True)
|
||||
is_due_for_review = serializers.ReadOnlyField()
|
||||
|
||||
class Meta:
|
||||
model = KnowledgeBaseArticle
|
||||
fields = [
|
||||
'id', 'title', 'slug', 'summary', 'article_type',
|
||||
'category', 'subcategory', 'status', 'is_featured',
|
||||
'view_count', 'author_username', 'created_at', 'updated_at',
|
||||
'difficulty_level', 'is_due_for_review'
|
||||
]
|
||||
|
||||
|
||||
class IncidentRecommendationSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for IncidentRecommendation model"""
|
||||
|
||||
incident_title = serializers.CharField(source='incident.title', read_only=True)
|
||||
related_incident_title = serializers.CharField(source='related_incident.title', read_only=True)
|
||||
knowledge_article_title = serializers.CharField(source='knowledge_article.title', read_only=True)
|
||||
suggested_expert_username = serializers.CharField(source='suggested_expert.username', read_only=True)
|
||||
applied_by_username = serializers.CharField(source='applied_by.username', read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = IncidentRecommendation
|
||||
fields = [
|
||||
'id', 'incident', 'incident_title', 'recommendation_type',
|
||||
'title', 'description', 'similarity_score', 'confidence_level',
|
||||
'confidence_score', 'related_incident', 'related_incident_title',
|
||||
'knowledge_article', 'knowledge_article_title',
|
||||
'suggested_expert', 'suggested_expert_username',
|
||||
'suggested_actions', 'expected_outcome', 'time_to_implement',
|
||||
'is_applied', 'applied_at', 'applied_by', 'applied_by_username',
|
||||
'effectiveness_rating', 'reasoning', 'matching_factors',
|
||||
'model_version', 'created_at', 'updated_at'
|
||||
]
|
||||
read_only_fields = ['id', 'created_at', 'updated_at']
|
||||
|
||||
def validate_effectiveness_rating(self, value):
|
||||
"""Validate effectiveness rating"""
|
||||
if value is not None and (value < 1 or value > 5):
|
||||
raise serializers.ValidationError("Effectiveness rating must be between 1 and 5")
|
||||
return value
|
||||
|
||||
|
||||
class IncidentRecommendationListSerializer(serializers.ModelSerializer):
|
||||
"""Simplified serializer for incident recommendation lists"""
|
||||
|
||||
incident_title = serializers.CharField(source='incident.title', read_only=True)
|
||||
related_incident_title = serializers.CharField(source='related_incident.title', read_only=True)
|
||||
knowledge_article_title = serializers.CharField(source='knowledge_article.title', read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = IncidentRecommendation
|
||||
fields = [
|
||||
'id', 'incident', 'incident_title', 'recommendation_type',
|
||||
'title', 'similarity_score', 'confidence_level', 'confidence_score',
|
||||
'related_incident_title', 'knowledge_article_title',
|
||||
'is_applied', 'created_at'
|
||||
]
|
||||
|
||||
|
||||
class LearningPatternSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for LearningPattern model"""
|
||||
|
||||
validated_by_username = serializers.CharField(source='validated_by.username', read_only=True)
|
||||
source_incident_count = serializers.SerializerMethodField()
|
||||
source_postmortem_count = serializers.SerializerMethodField()
|
||||
|
||||
class Meta:
|
||||
model = LearningPattern
|
||||
fields = [
|
||||
'id', 'name', 'pattern_type', 'description',
|
||||
'frequency', 'success_rate', 'confidence_score',
|
||||
'triggers', 'actions', 'outcomes',
|
||||
'source_incidents', 'source_postmortems',
|
||||
'source_incident_count', 'source_postmortem_count',
|
||||
'is_validated', 'validated_by', 'validated_by_username',
|
||||
'validation_notes', 'times_applied', 'last_applied',
|
||||
'created_at', 'updated_at'
|
||||
]
|
||||
read_only_fields = ['id', 'created_at', 'updated_at']
|
||||
|
||||
def get_source_incident_count(self, obj):
|
||||
"""Get count of source incidents"""
|
||||
return obj.source_incidents.count()
|
||||
|
||||
def get_source_postmortem_count(self, obj):
|
||||
"""Get count of source postmortems"""
|
||||
return obj.source_postmortems.count()
|
||||
|
||||
|
||||
class LearningPatternListSerializer(serializers.ModelSerializer):
|
||||
"""Simplified serializer for learning pattern lists"""
|
||||
|
||||
validated_by_username = serializers.CharField(source='validated_by.username', read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = LearningPattern
|
||||
fields = [
|
||||
'id', 'name', 'pattern_type', 'frequency',
|
||||
'success_rate', 'confidence_score', 'is_validated',
|
||||
'validated_by_username', 'times_applied', 'created_at'
|
||||
]
|
||||
|
||||
|
||||
class KnowledgeBaseUsageSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for KnowledgeBaseUsage model"""
|
||||
|
||||
user_username = serializers.CharField(source='user.username', read_only=True)
|
||||
knowledge_article_title = serializers.CharField(source='knowledge_article.title', read_only=True)
|
||||
recommendation_title = serializers.CharField(source='recommendation.title', read_only=True)
|
||||
incident_title = serializers.CharField(source='incident.title', read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = KnowledgeBaseUsage
|
||||
fields = [
|
||||
'id', 'user', 'user_username', 'usage_type',
|
||||
'knowledge_article', 'knowledge_article_title',
|
||||
'recommendation', 'recommendation_title',
|
||||
'incident', 'incident_title',
|
||||
'context', 'session_id', 'created_at'
|
||||
]
|
||||
read_only_fields = ['id', 'created_at']
|
||||
|
||||
|
||||
class AutomatedPostmortemGenerationSerializer(serializers.ModelSerializer):
|
||||
"""Serializer for AutomatedPostmortemGeneration model"""
|
||||
|
||||
incident_title = serializers.CharField(source='incident.title', read_only=True)
|
||||
generated_postmortem_title = serializers.CharField(source='generated_postmortem.title', read_only=True)
|
||||
|
||||
class Meta:
|
||||
model = AutomatedPostmortemGeneration
|
||||
fields = [
|
||||
'id', 'incident', 'incident_title', 'status',
|
||||
'generation_trigger', 'incident_data', 'timeline_data',
|
||||
'log_data', 'generated_content', 'confidence_scores',
|
||||
'quality_metrics', 'generated_postmortem', 'generated_postmortem_title',
|
||||
'processing_time', 'model_version', 'error_message',
|
||||
'started_at', 'completed_at'
|
||||
]
|
||||
read_only_fields = ['id', 'started_at', 'completed_at']
|
||||
|
||||
|
||||
class PostmortemGenerationRequestSerializer(serializers.Serializer):
|
||||
"""Serializer for requesting postmortem generation"""
|
||||
|
||||
incident_id = serializers.UUIDField()
|
||||
include_timeline = serializers.BooleanField(default=True)
|
||||
include_logs = serializers.BooleanField(default=True)
|
||||
generation_trigger = serializers.CharField(max_length=50, default='manual')
|
||||
|
||||
def validate_incident_id(self, value):
|
||||
"""Validate that incident exists and is resolved"""
|
||||
from incident_intelligence.models import Incident
|
||||
try:
|
||||
incident = Incident.objects.get(id=value)
|
||||
if not incident.is_resolved:
|
||||
raise serializers.ValidationError("Postmortem can only be generated for resolved incidents")
|
||||
return value
|
||||
except Incident.DoesNotExist:
|
||||
raise serializers.ValidationError("Incident not found")
|
||||
|
||||
|
||||
class RecommendationRequestSerializer(serializers.Serializer):
|
||||
"""Serializer for requesting incident recommendations"""
|
||||
|
||||
incident_id = serializers.UUIDField()
|
||||
recommendation_types = serializers.ListField(
|
||||
child=serializers.ChoiceField(choices=IncidentRecommendation.RECOMMENDATION_TYPE_CHOICES),
|
||||
required=False
|
||||
)
|
||||
max_recommendations = serializers.IntegerField(min_value=1, max_value=20, default=5)
|
||||
min_confidence = serializers.FloatField(min_value=0.0, max_value=1.0, default=0.5)
|
||||
|
||||
def validate_incident_id(self, value):
|
||||
"""Validate that incident exists"""
|
||||
from incident_intelligence.models import Incident
|
||||
try:
|
||||
Incident.objects.get(id=value)
|
||||
return value
|
||||
except Incident.DoesNotExist:
|
||||
raise serializers.ValidationError("Incident not found")
|
||||
|
||||
|
||||
class KnowledgeBaseSearchSerializer(serializers.Serializer):
|
||||
"""Serializer for knowledge base search"""
|
||||
|
||||
query = serializers.CharField(max_length=200)
|
||||
article_types = serializers.ListField(
|
||||
child=serializers.ChoiceField(choices=KnowledgeBaseArticle.ARTICLE_TYPE_CHOICES),
|
||||
required=False
|
||||
)
|
||||
categories = serializers.ListField(child=serializers.CharField(), required=False)
|
||||
difficulty_levels = serializers.ListField(
|
||||
child=serializers.ChoiceField(choices=KnowledgeBaseArticle._meta.get_field('difficulty_level').choices),
|
||||
required=False
|
||||
)
|
||||
limit = serializers.IntegerField(min_value=1, max_value=100, default=20)
|
||||
offset = serializers.IntegerField(min_value=0, default=0)
|
||||
|
||||
|
||||
class KnowledgeBaseArticleRatingSerializer(serializers.Serializer):
|
||||
"""Serializer for rating knowledge base articles"""
|
||||
|
||||
article_id = serializers.UUIDField()
|
||||
rating = serializers.IntegerField(min_value=1, max_value=5)
|
||||
feedback = serializers.CharField(max_length=500, required=False)
|
||||
|
||||
def validate_article_id(self, value):
|
||||
"""Validate that article exists"""
|
||||
try:
|
||||
KnowledgeBaseArticle.objects.get(id=value)
|
||||
return value
|
||||
except KnowledgeBaseArticle.DoesNotExist:
|
||||
raise serializers.ValidationError("Knowledge base article not found")
|
||||
1
ETB-API/knowledge_learning/services/__init__.py
Normal file
1
ETB-API/knowledge_learning/services/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Knowledge Learning Services
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
422
ETB-API/knowledge_learning/services/knowledge_base_search.py
Normal file
422
ETB-API/knowledge_learning/services/knowledge_base_search.py
Normal file
@@ -0,0 +1,422 @@
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional
|
||||
from django.db.models import Q, Count
|
||||
from django.utils import timezone
|
||||
# from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
# from sklearn.metrics.pairwise import cosine_similarity
|
||||
import re
|
||||
|
||||
from ..models import KnowledgeBaseArticle, KnowledgeBaseUsage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KnowledgeBaseSearchService:
|
||||
"""Service for searching and discovering knowledge base articles"""
|
||||
|
||||
def __init__(self):
|
||||
self.model_version = "v1.0"
|
||||
self.min_similarity_threshold = 0.1
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
article_types: Optional[List[str]] = None,
|
||||
categories: Optional[List[str]] = None,
|
||||
difficulty_levels: Optional[List[str]] = None,
|
||||
limit: int = 20,
|
||||
offset: int = 0
|
||||
) -> Dict[str, Any]:
|
||||
"""Search knowledge base articles with various filters"""
|
||||
|
||||
try:
|
||||
# Build base queryset
|
||||
queryset = KnowledgeBaseArticle.objects.filter(status='PUBLISHED')
|
||||
|
||||
# Apply filters
|
||||
if article_types:
|
||||
queryset = queryset.filter(article_type__in=article_types)
|
||||
|
||||
if categories:
|
||||
queryset = queryset.filter(category__in=categories)
|
||||
|
||||
if difficulty_levels:
|
||||
queryset = queryset.filter(difficulty_level__in=difficulty_levels)
|
||||
|
||||
# Get all matching articles for similarity calculation
|
||||
all_articles = list(queryset)
|
||||
|
||||
if not all_articles:
|
||||
return {
|
||||
'results': [],
|
||||
'total_count': 0,
|
||||
'query': query,
|
||||
'filters': {
|
||||
'article_types': article_types,
|
||||
'categories': categories,
|
||||
'difficulty_levels': difficulty_levels
|
||||
}
|
||||
}
|
||||
|
||||
# Calculate similarity scores
|
||||
articles_with_scores = self._calculate_similarity_scores(query, all_articles)
|
||||
|
||||
# Sort by relevance (combination of similarity and popularity)
|
||||
articles_with_scores.sort(
|
||||
key=lambda x: (x['similarity_score'] * 0.7) + (x['popularity_score'] * 0.3),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
# Apply pagination
|
||||
paginated_articles = articles_with_scores[offset:offset + limit]
|
||||
|
||||
# Format results
|
||||
results = []
|
||||
for article_data in paginated_articles:
|
||||
article = article_data['article']
|
||||
results.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'slug': article.slug,
|
||||
'summary': article.summary,
|
||||
'article_type': article.article_type,
|
||||
'category': article.category,
|
||||
'subcategory': article.subcategory,
|
||||
'tags': article.tags,
|
||||
'difficulty_level': article.difficulty_level,
|
||||
'view_count': article.view_count,
|
||||
'created_at': article.created_at.isoformat(),
|
||||
'updated_at': article.updated_at.isoformat(),
|
||||
'author': article.author.username if article.author else None,
|
||||
'similarity_score': article_data['similarity_score'],
|
||||
'relevance_score': article_data['relevance_score'],
|
||||
'popularity_score': article_data['popularity_score'],
|
||||
'matching_keywords': article_data['matching_keywords']
|
||||
})
|
||||
|
||||
return {
|
||||
'results': results,
|
||||
'total_count': len(articles_with_scores),
|
||||
'query': query,
|
||||
'filters': {
|
||||
'article_types': article_types,
|
||||
'categories': categories,
|
||||
'difficulty_levels': difficulty_levels
|
||||
},
|
||||
'pagination': {
|
||||
'limit': limit,
|
||||
'offset': offset,
|
||||
'has_more': (offset + limit) < len(articles_with_scores)
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to search knowledge base: {str(e)}")
|
||||
raise
|
||||
|
||||
def find_related_articles(
|
||||
self,
|
||||
article_id: str,
|
||||
limit: int = 5
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Find articles related to a specific article"""
|
||||
|
||||
try:
|
||||
article = KnowledgeBaseArticle.objects.get(id=article_id)
|
||||
|
||||
# Find articles with similar categories, tags, or content
|
||||
related_articles = KnowledgeBaseArticle.objects.filter(
|
||||
status='PUBLISHED'
|
||||
).exclude(id=article_id).filter(
|
||||
Q(category=article.category) |
|
||||
Q(subcategory=article.subcategory) |
|
||||
Q(tags__overlap=article.tags) |
|
||||
Q(article_type=article.article_type)
|
||||
).distinct()
|
||||
|
||||
if not related_articles.exists():
|
||||
return []
|
||||
|
||||
# Calculate similarity scores
|
||||
article_text = f"{article.title} {article.summary} {' '.join(article.tags)}"
|
||||
articles_with_scores = []
|
||||
|
||||
for related_article in related_articles:
|
||||
related_text = f"{related_article.title} {related_article.summary} {' '.join(related_article.tags)}"
|
||||
similarity = self._calculate_text_similarity(article_text, related_text)
|
||||
|
||||
if similarity >= self.min_similarity_threshold:
|
||||
articles_with_scores.append({
|
||||
'article': related_article,
|
||||
'similarity_score': similarity
|
||||
})
|
||||
|
||||
# Sort by similarity and return top matches
|
||||
articles_with_scores.sort(key=lambda x: x['similarity_score'], reverse=True)
|
||||
|
||||
results = []
|
||||
for article_data in articles_with_scores[:limit]:
|
||||
article = article_data['article']
|
||||
results.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'slug': article.slug,
|
||||
'summary': article.summary,
|
||||
'article_type': article.article_type,
|
||||
'category': article.category,
|
||||
'similarity_score': article_data['similarity_score']
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
except KnowledgeBaseArticle.DoesNotExist:
|
||||
raise ValueError(f"Article with ID {article_id} not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to find related articles: {str(e)}")
|
||||
raise
|
||||
|
||||
def suggest_articles_for_incident(
|
||||
self,
|
||||
incident_title: str,
|
||||
incident_description: str,
|
||||
incident_category: str,
|
||||
limit: int = 5
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Suggest knowledge base articles for an incident"""
|
||||
|
||||
try:
|
||||
# Build search query from incident data
|
||||
search_query = f"{incident_title} {incident_description} {incident_category}"
|
||||
|
||||
# Search for relevant articles
|
||||
search_results = self.search(
|
||||
query=search_query,
|
||||
categories=[incident_category] if incident_category else None,
|
||||
limit=limit * 2 # Get more results to filter
|
||||
)
|
||||
|
||||
# Filter and rank results
|
||||
relevant_articles = []
|
||||
for result in search_results['results']:
|
||||
# Boost score for category matches
|
||||
category_boost = 0.2 if result['category'] == incident_category else 0.0
|
||||
|
||||
# Boost score for runbooks and troubleshooting guides
|
||||
type_boost = 0.1 if result['article_type'] in ['RUNBOOK', 'TROUBLESHOOTING'] else 0.0
|
||||
|
||||
final_score = result['similarity_score'] + category_boost + type_boost
|
||||
|
||||
if final_score >= self.min_similarity_threshold:
|
||||
relevant_articles.append({
|
||||
**result,
|
||||
'final_score': final_score
|
||||
})
|
||||
|
||||
# Sort by final score and return top matches
|
||||
relevant_articles.sort(key=lambda x: x['final_score'], reverse=True)
|
||||
|
||||
return relevant_articles[:limit]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to suggest articles for incident: {str(e)}")
|
||||
raise
|
||||
|
||||
def get_popular_articles(
|
||||
self,
|
||||
category: Optional[str] = None,
|
||||
article_type: Optional[str] = None,
|
||||
limit: int = 10
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Get popular articles based on view count and recent activity"""
|
||||
|
||||
try:
|
||||
queryset = KnowledgeBaseArticle.objects.filter(status='PUBLISHED')
|
||||
|
||||
if category:
|
||||
queryset = queryset.filter(category=category)
|
||||
|
||||
if article_type:
|
||||
queryset = queryset.filter(article_type=article_type)
|
||||
|
||||
# Get articles ordered by popularity (view count + recent activity)
|
||||
popular_articles = queryset.order_by('-view_count', '-updated_at')[:limit]
|
||||
|
||||
results = []
|
||||
for article in popular_articles:
|
||||
results.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'slug': article.slug,
|
||||
'summary': article.summary,
|
||||
'article_type': article.article_type,
|
||||
'category': article.category,
|
||||
'view_count': article.view_count,
|
||||
'updated_at': article.updated_at.isoformat(),
|
||||
'is_featured': article.is_featured
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get popular articles: {str(e)}")
|
||||
raise
|
||||
|
||||
def get_articles_due_for_review(self) -> List[Dict[str, Any]]:
|
||||
"""Get articles that are due for review"""
|
||||
|
||||
try:
|
||||
due_articles = KnowledgeBaseArticle.objects.filter(
|
||||
next_review_due__lt=timezone.now(),
|
||||
status='PUBLISHED'
|
||||
).order_by('next_review_due')
|
||||
|
||||
results = []
|
||||
for article in due_articles:
|
||||
results.append({
|
||||
'id': str(article.id),
|
||||
'title': article.title,
|
||||
'slug': article.slug,
|
||||
'category': article.category,
|
||||
'last_reviewed': article.last_reviewed.isoformat() if article.last_reviewed else None,
|
||||
'next_review_due': article.next_review_due.isoformat(),
|
||||
'maintainer': article.maintainer.username if article.maintainer else None,
|
||||
'days_overdue': (timezone.now() - article.next_review_due).days
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get articles due for review: {str(e)}")
|
||||
raise
|
||||
|
||||
def _calculate_similarity_scores(
|
||||
self,
|
||||
query: str,
|
||||
articles: List[KnowledgeBaseArticle]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Calculate similarity scores for articles against a query"""
|
||||
|
||||
if not articles:
|
||||
return []
|
||||
|
||||
# Prepare texts for similarity calculation
|
||||
query_text = self._preprocess_text(query)
|
||||
article_texts = []
|
||||
|
||||
for article in articles:
|
||||
article_text = f"{article.title} {article.summary} {' '.join(article.tags)} {' '.join(article.search_keywords)}"
|
||||
article_texts.append(self._preprocess_text(article_text))
|
||||
|
||||
# Calculate similarity using simple keyword matching (fallback)
|
||||
try:
|
||||
similarities = [self._calculate_keyword_similarity(query, article_text) for article_text in article_texts]
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate similarity: {str(e)}")
|
||||
similarities = [0.0] * len(article_texts)
|
||||
|
||||
# Prepare results with additional scoring
|
||||
results = []
|
||||
for i, article in enumerate(articles):
|
||||
similarity_score = float(similarities[i])
|
||||
popularity_score = self._calculate_popularity_score(article)
|
||||
relevance_score = (similarity_score * 0.7) + (popularity_score * 0.3)
|
||||
|
||||
matching_keywords = self._find_matching_keywords(query, article)
|
||||
|
||||
results.append({
|
||||
'article': article,
|
||||
'similarity_score': similarity_score,
|
||||
'popularity_score': popularity_score,
|
||||
'relevance_score': relevance_score,
|
||||
'matching_keywords': matching_keywords
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def _calculate_text_similarity(self, text1: str, text2: str) -> float:
|
||||
"""Calculate text similarity using simple keyword matching (fallback)"""
|
||||
try:
|
||||
if not text1.strip() or not text2.strip():
|
||||
return 0.0
|
||||
|
||||
# Simple keyword-based similarity as fallback
|
||||
words1 = set(text1.lower().split())
|
||||
words2 = set(text2.lower().split())
|
||||
|
||||
if not words1 or not words2:
|
||||
return 0.0
|
||||
|
||||
intersection = words1.intersection(words2)
|
||||
union = words1.union(words2)
|
||||
|
||||
return len(intersection) / len(union) if union else 0.0
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate text similarity: {str(e)}")
|
||||
return 0.0
|
||||
|
||||
def _calculate_keyword_similarity(self, query: str, article_text: str) -> float:
|
||||
"""Fallback similarity calculation using keyword matching"""
|
||||
query_words = set(self._extract_keywords(query.lower()))
|
||||
article_words = set(self._extract_keywords(article_text.lower()))
|
||||
|
||||
if not query_words or not article_words:
|
||||
return 0.0
|
||||
|
||||
intersection = query_words.intersection(article_words)
|
||||
union = query_words.union(article_words)
|
||||
|
||||
return len(intersection) / len(union) if union else 0.0
|
||||
|
||||
def _calculate_popularity_score(self, article: KnowledgeBaseArticle) -> float:
|
||||
"""Calculate popularity score based on views and recency"""
|
||||
# Normalize view count (assuming max views is around 1000)
|
||||
view_score = min(article.view_count / 1000.0, 1.0)
|
||||
|
||||
# Calculate recency score (more recent = higher score)
|
||||
days_since_update = (timezone.now() - article.updated_at).days
|
||||
recency_score = max(0, 1 - (days_since_update / 365.0)) # Decay over a year
|
||||
|
||||
# Featured articles get a boost
|
||||
featured_boost = 0.1 if article.is_featured else 0.0
|
||||
|
||||
return (view_score * 0.6) + (recency_score * 0.3) + featured_boost
|
||||
|
||||
def _preprocess_text(self, text: str) -> str:
|
||||
"""Preprocess text for similarity calculation"""
|
||||
# Convert to lowercase
|
||||
text = text.lower()
|
||||
|
||||
# Remove special characters but keep spaces
|
||||
text = re.sub(r'[^\w\s]', ' ', text)
|
||||
|
||||
# Remove extra whitespace
|
||||
text = re.sub(r'\s+', ' ', text).strip()
|
||||
|
||||
return text
|
||||
|
||||
def _extract_keywords(self, text: str) -> List[str]:
|
||||
"""Extract keywords from text"""
|
||||
# Simple keyword extraction - in production, you might use more sophisticated methods
|
||||
words = text.split()
|
||||
|
||||
# Filter out common stop words
|
||||
stop_words = {
|
||||
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
|
||||
'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'have',
|
||||
'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should'
|
||||
}
|
||||
|
||||
keywords = [word for word in words if len(word) > 2 and word not in stop_words]
|
||||
return keywords
|
||||
|
||||
def _find_matching_keywords(self, query: str, article: KnowledgeBaseArticle) -> List[str]:
|
||||
"""Find keywords that match between query and article"""
|
||||
query_keywords = set(self._extract_keywords(query.lower()))
|
||||
|
||||
# Check article title, summary, tags, and search keywords
|
||||
article_text = f"{article.title} {article.summary} {' '.join(article.tags)} {' '.join(article.search_keywords)}"
|
||||
article_keywords = set(self._extract_keywords(article_text.lower()))
|
||||
|
||||
matching_keywords = list(query_keywords.intersection(article_keywords))
|
||||
return matching_keywords[:5] # Return top 5 matches
|
||||
361
ETB-API/knowledge_learning/services/postmortem_generator.py
Normal file
361
ETB-API/knowledge_learning/services/postmortem_generator.py
Normal file
@@ -0,0 +1,361 @@
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Any, Optional
|
||||
from django.utils import timezone
|
||||
from django.db import transaction
|
||||
|
||||
from ..models import Postmortem, AutomatedPostmortemGeneration
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PostmortemGenerator:
|
||||
"""Service for generating automated postmortems from incident data"""
|
||||
|
||||
def __init__(self):
|
||||
self.model_version = "v1.0"
|
||||
|
||||
def generate_postmortem_for_incident(
|
||||
self,
|
||||
incident_id: str,
|
||||
include_timeline: bool = True,
|
||||
include_logs: bool = True,
|
||||
trigger: str = "manual"
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate a postmortem for a specific incident"""
|
||||
|
||||
try:
|
||||
incident = Incident.objects.get(id=incident_id)
|
||||
|
||||
# Create generation log
|
||||
generation_log = AutomatedPostmortemGeneration.objects.create(
|
||||
incident=incident,
|
||||
status='PROCESSING',
|
||||
generation_trigger=trigger,
|
||||
incident_data=self._extract_incident_data(incident),
|
||||
timeline_data=self._extract_timeline_data(incident) if include_timeline else [],
|
||||
log_data=self._extract_log_data(incident) if include_logs else []
|
||||
)
|
||||
|
||||
# Generate postmortem content
|
||||
generated_content = self._generate_content(incident, generation_log)
|
||||
|
||||
# Create postmortem
|
||||
postmortem = Postmortem.objects.create(
|
||||
title=f"Postmortem: {incident.title}",
|
||||
incident=incident,
|
||||
executive_summary=generated_content.get('executive_summary', ''),
|
||||
timeline=generated_content.get('timeline', []),
|
||||
root_cause_analysis=generated_content.get('root_cause_analysis', ''),
|
||||
impact_assessment=generated_content.get('impact_assessment', ''),
|
||||
lessons_learned=generated_content.get('lessons_learned', ''),
|
||||
action_items=generated_content.get('action_items', []),
|
||||
is_automated=True,
|
||||
generation_confidence=generated_content.get('confidence_score', 0.0),
|
||||
auto_generated_sections=generated_content.get('generated_sections', []),
|
||||
status='DRAFT',
|
||||
severity=incident.severity,
|
||||
affected_services=self._extract_affected_services(incident),
|
||||
affected_teams=self._extract_affected_teams(incident),
|
||||
due_date=timezone.now() + timedelta(days=7) # Due in 7 days
|
||||
)
|
||||
|
||||
# Update generation log
|
||||
generation_log.status = 'COMPLETED'
|
||||
generation_log.generated_content = generated_content
|
||||
generation_log.generated_postmortem = postmortem
|
||||
generation_log.confidence_scores = generated_content.get('confidence_scores', {})
|
||||
generation_log.quality_metrics = generated_content.get('quality_metrics', {})
|
||||
generation_log.completed_at = timezone.now()
|
||||
generation_log.save()
|
||||
|
||||
return {
|
||||
'generation_id': str(generation_log.id),
|
||||
'postmortem_id': str(postmortem.id),
|
||||
'status': 'completed',
|
||||
'confidence_score': postmortem.generation_confidence
|
||||
}
|
||||
|
||||
except Incident.DoesNotExist:
|
||||
raise ValueError(f"Incident with ID {incident_id} not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate postmortem for incident {incident_id}: {str(e)}")
|
||||
|
||||
# Update generation log with error
|
||||
if 'generation_log' in locals():
|
||||
generation_log.status = 'FAILED'
|
||||
generation_log.error_message = str(e)
|
||||
generation_log.completed_at = timezone.now()
|
||||
generation_log.save()
|
||||
|
||||
raise
|
||||
|
||||
def generate_postmortem(self, incident: Incident) -> Dict[str, Any]:
|
||||
"""Generate postmortem content for an incident"""
|
||||
return self._generate_content(incident, None)
|
||||
|
||||
def _generate_content(self, incident: Incident, generation_log: Optional[AutomatedPostmortemGeneration] = None) -> Dict[str, Any]:
|
||||
"""Generate the actual postmortem content using AI/ML models"""
|
||||
|
||||
# This is a simplified implementation - in production, you would integrate with
|
||||
# actual AI/ML services like OpenAI, Azure Cognitive Services, or custom models
|
||||
|
||||
content = {
|
||||
'executive_summary': self._generate_executive_summary(incident),
|
||||
'timeline': self._generate_timeline(incident),
|
||||
'root_cause_analysis': self._generate_root_cause_analysis(incident),
|
||||
'impact_assessment': self._generate_impact_assessment(incident),
|
||||
'lessons_learned': self._generate_lessons_learned(incident),
|
||||
'action_items': self._generate_action_items(incident),
|
||||
'confidence_score': self._calculate_confidence_score(incident),
|
||||
'generated_sections': ['executive_summary', 'timeline', 'root_cause_analysis', 'impact_assessment', 'lessons_learned', 'action_items'],
|
||||
'confidence_scores': {
|
||||
'executive_summary': 0.85,
|
||||
'timeline': 0.90,
|
||||
'root_cause_analysis': 0.75,
|
||||
'impact_assessment': 0.80,
|
||||
'lessons_learned': 0.70,
|
||||
'action_items': 0.75
|
||||
},
|
||||
'quality_metrics': {
|
||||
'completeness': 0.85,
|
||||
'accuracy': 0.80,
|
||||
'actionability': 0.75
|
||||
}
|
||||
}
|
||||
|
||||
return content
|
||||
|
||||
def _generate_executive_summary(self, incident: Incident) -> str:
|
||||
"""Generate executive summary"""
|
||||
return f"""
|
||||
On {incident.created_at.strftime('%Y-%m-%d %H:%M')}, a {incident.severity.lower()} severity incident occurred affecting {incident.affected_users} users.
|
||||
The incident was categorized as {incident.category} and was resolved after {self._calculate_resolution_time(incident)}.
|
||||
|
||||
Key Impact:
|
||||
- {incident.affected_users} users affected
|
||||
- Business impact: {incident.business_impact or 'Not specified'}
|
||||
- Resolution time: {self._calculate_resolution_time(incident)}
|
||||
|
||||
This postmortem outlines the timeline, root causes, and preventive measures to avoid similar incidents in the future.
|
||||
"""
|
||||
|
||||
def _generate_timeline(self, incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Generate incident timeline"""
|
||||
timeline = [
|
||||
{
|
||||
'timestamp': incident.created_at.isoformat(),
|
||||
'event': 'Incident reported',
|
||||
'description': f'Incident "{incident.title}" was reported',
|
||||
'actor': incident.reporter.username if incident.reporter else 'System'
|
||||
}
|
||||
]
|
||||
|
||||
if incident.assigned_to:
|
||||
timeline.append({
|
||||
'timestamp': incident.updated_at.isoformat(),
|
||||
'event': 'Incident assigned',
|
||||
'description': f'Incident assigned to {incident.assigned_to.username}',
|
||||
'actor': 'System'
|
||||
})
|
||||
|
||||
if incident.resolved_at:
|
||||
timeline.append({
|
||||
'timestamp': incident.resolved_at.isoformat(),
|
||||
'event': 'Incident resolved',
|
||||
'description': f'Incident resolved with status: {incident.status}',
|
||||
'actor': incident.assigned_to.username if incident.assigned_to else 'System'
|
||||
})
|
||||
|
||||
return timeline
|
||||
|
||||
def _generate_root_cause_analysis(self, incident: Incident) -> str:
|
||||
"""Generate root cause analysis"""
|
||||
return f"""
|
||||
Root Cause Analysis for {incident.title}:
|
||||
|
||||
Primary Factors:
|
||||
1. Technical Issue: {incident.category} - {incident.subcategory or 'Not specified'}
|
||||
2. System Component: {incident.description[:200]}...
|
||||
3. User Impact: {incident.affected_users} users affected
|
||||
|
||||
Contributing Factors:
|
||||
- Incident severity: {incident.severity}
|
||||
- Priority level: {incident.priority}
|
||||
- Resolution time: {self._calculate_resolution_time(incident)}
|
||||
|
||||
Analysis:
|
||||
Based on the incident details and timeline, the root cause appears to be related to {incident.category}.
|
||||
The incident was classified with {incident.classification_confidence or 0.0:.2%} confidence,
|
||||
suggesting {incident.subcategory or 'a system component failure'} as the primary cause.
|
||||
|
||||
Recommendations for further investigation:
|
||||
1. Review system logs for the time period {incident.created_at} to {incident.resolved_at or incident.updated_at}
|
||||
2. Analyze similar incidents in the past 30 days
|
||||
3. Check for any recent deployments or configuration changes
|
||||
"""
|
||||
|
||||
def _generate_impact_assessment(self, incident: Incident) -> str:
|
||||
"""Generate impact assessment"""
|
||||
return f"""
|
||||
Impact Assessment for {incident.title}:
|
||||
|
||||
User Impact:
|
||||
- Total users affected: {incident.affected_users}
|
||||
- Severity level: {incident.severity}
|
||||
- Duration of impact: {self._calculate_resolution_time(incident)}
|
||||
|
||||
Business Impact:
|
||||
{incident.business_impact or 'Business impact not specified in incident details.'}
|
||||
|
||||
Technical Impact:
|
||||
- Affected services: {', '.join(self._extract_affected_services(incident))}
|
||||
- System components: {incident.category} - {incident.subcategory or 'Not specified'}
|
||||
- Estimated downtime: {incident.estimated_downtime or 'Not specified'}
|
||||
|
||||
Financial Impact:
|
||||
- Estimated cost: To be calculated based on user impact and downtime
|
||||
- SLA impact: {'SLA override applied' if incident.sla_override else 'Normal SLA applies'}
|
||||
|
||||
Reputation Impact:
|
||||
- Customer satisfaction: Potentially affected due to service disruption
|
||||
- Trust level: Impact depends on resolution time and communication
|
||||
"""
|
||||
|
||||
def _generate_lessons_learned(self, incident: Incident) -> str:
|
||||
"""Generate lessons learned"""
|
||||
return f"""
|
||||
Lessons Learned from {incident.title}:
|
||||
|
||||
What Went Well:
|
||||
1. Incident was properly categorized as {incident.category}
|
||||
2. {'Automated remediation was attempted' if incident.auto_remediation_attempted else 'Manual intervention was required'}
|
||||
3. {'Runbook was suggested' if incident.runbook_suggested else 'No runbook was available'}
|
||||
|
||||
What Could Be Improved:
|
||||
1. {'Faster detection and response time needed' if self._calculate_resolution_time(incident) > timedelta(hours=1) else 'Response time was acceptable'}
|
||||
2. {'Better automation coverage needed' if not incident.auto_remediation_attempted else 'Automation worked as expected'}
|
||||
3. {'More detailed incident description needed' if len(incident.description) < 100 else 'Incident description was adequate'}
|
||||
|
||||
Key Insights:
|
||||
1. {incident.category} incidents require {'immediate' if incident.severity in ['CRITICAL', 'EMERGENCY'] else 'standard'} response procedures
|
||||
2. {'Automation can help' if incident.automation_enabled else 'Manual processes need improvement'} in similar scenarios
|
||||
3. {'SLA override was necessary' if incident.sla_override else 'Standard SLA procedures were sufficient'}
|
||||
|
||||
Process Improvements:
|
||||
1. Review and update runbooks for {incident.category} incidents
|
||||
2. {'Enhance monitoring' if incident.severity in ['HIGH', 'CRITICAL', 'EMERGENCY'] else 'Maintain current monitoring'} for early detection
|
||||
3. {'Improve automation' if not incident.auto_remediation_attempted else 'Automation is working well'} for faster resolution
|
||||
"""
|
||||
|
||||
def _generate_action_items(self, incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Generate action items"""
|
||||
action_items = [
|
||||
{
|
||||
'title': f'Review and update {incident.category} runbook',
|
||||
'description': f'Update the runbook for {incident.category} incidents based on lessons learned',
|
||||
'priority': 'HIGH' if incident.severity in ['CRITICAL', 'EMERGENCY'] else 'MEDIUM',
|
||||
'assignee': 'TBD',
|
||||
'due_date': (timezone.now() + timedelta(days=14)).isoformat(),
|
||||
'category': 'Process Improvement'
|
||||
},
|
||||
{
|
||||
'title': 'Enhance monitoring and alerting',
|
||||
'description': f'Improve monitoring for {incident.category} to detect similar issues earlier',
|
||||
'priority': 'MEDIUM',
|
||||
'assignee': 'TBD',
|
||||
'due_date': (timezone.now() + timedelta(days=21)).isoformat(),
|
||||
'category': 'Technical Improvement'
|
||||
}
|
||||
]
|
||||
|
||||
if not incident.auto_remediation_attempted and incident.automation_enabled:
|
||||
action_items.append({
|
||||
'title': 'Implement automated remediation',
|
||||
'description': f'Develop automated remediation for {incident.category} incidents',
|
||||
'priority': 'HIGH' if incident.severity in ['CRITICAL', 'EMERGENCY'] else 'MEDIUM',
|
||||
'assignee': 'TBD',
|
||||
'due_date': (timezone.now() + timedelta(days=30)).isoformat(),
|
||||
'category': 'Automation'
|
||||
})
|
||||
|
||||
return action_items
|
||||
|
||||
def _calculate_confidence_score(self, incident: Incident) -> float:
|
||||
"""Calculate overall confidence score for the generated postmortem"""
|
||||
base_confidence = 0.7
|
||||
|
||||
# Adjust based on incident data quality
|
||||
if incident.classification_confidence:
|
||||
base_confidence += incident.classification_confidence * 0.2
|
||||
|
||||
if len(incident.description) > 200:
|
||||
base_confidence += 0.1
|
||||
|
||||
if incident.business_impact:
|
||||
base_confidence += 0.1
|
||||
|
||||
return min(base_confidence, 1.0)
|
||||
|
||||
def _calculate_resolution_time(self, incident: Incident) -> str:
|
||||
"""Calculate resolution time"""
|
||||
if incident.resolved_at and incident.created_at:
|
||||
duration = incident.resolved_at - incident.created_at
|
||||
return str(duration)
|
||||
return "Not resolved yet"
|
||||
|
||||
def _extract_incident_data(self, incident: Incident) -> Dict[str, Any]:
|
||||
"""Extract relevant incident data for postmortem generation"""
|
||||
return {
|
||||
'id': str(incident.id),
|
||||
'title': incident.title,
|
||||
'description': incident.description,
|
||||
'category': incident.category,
|
||||
'subcategory': incident.subcategory,
|
||||
'severity': incident.severity,
|
||||
'priority': incident.priority,
|
||||
'status': incident.status,
|
||||
'affected_users': incident.affected_users,
|
||||
'business_impact': incident.business_impact,
|
||||
'created_at': incident.created_at.isoformat(),
|
||||
'resolved_at': incident.resolved_at.isoformat() if incident.resolved_at else None,
|
||||
'assigned_to': incident.assigned_to.username if incident.assigned_to else None,
|
||||
'reporter': incident.reporter.username if incident.reporter else None
|
||||
}
|
||||
|
||||
def _extract_timeline_data(self, incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Extract timeline data from incident"""
|
||||
# In a real implementation, this would extract from incident logs, comments, etc.
|
||||
return self._generate_timeline(incident)
|
||||
|
||||
def _extract_log_data(self, incident: Incident) -> List[Dict[str, Any]]:
|
||||
"""Extract relevant log data for the incident"""
|
||||
# In a real implementation, this would query log systems
|
||||
return [
|
||||
{
|
||||
'timestamp': incident.created_at.isoformat(),
|
||||
'level': 'ERROR',
|
||||
'message': f'Incident {incident.title} reported',
|
||||
'source': 'incident_system'
|
||||
}
|
||||
]
|
||||
|
||||
def _extract_affected_services(self, incident: Incident) -> List[str]:
|
||||
"""Extract affected services from incident"""
|
||||
services = []
|
||||
if incident.category:
|
||||
services.append(incident.category)
|
||||
if incident.subcategory:
|
||||
services.append(incident.subcategory)
|
||||
return services
|
||||
|
||||
def _extract_affected_teams(self, incident: Incident) -> List[str]:
|
||||
"""Extract affected teams from incident"""
|
||||
teams = []
|
||||
if incident.assigned_to:
|
||||
teams.append(incident.assigned_to.username)
|
||||
if incident.reporter:
|
||||
teams.append(incident.reporter.username)
|
||||
return teams
|
||||
458
ETB-API/knowledge_learning/services/recommendation_engine.py
Normal file
458
ETB-API/knowledge_learning/services/recommendation_engine.py
Normal file
@@ -0,0 +1,458 @@
|
||||
import logging
|
||||
from typing import Dict, List, Any, Optional
|
||||
from datetime import datetime, timedelta
|
||||
from django.db.models import Q, Count, Avg
|
||||
from django.utils import timezone
|
||||
# from sklearn.feature_extraction.text import TfidfVectorizer
|
||||
# from sklearn.metrics.pairwise import cosine_similarity
|
||||
# import numpy as np
|
||||
|
||||
from ..models import IncidentRecommendation, KnowledgeBaseArticle, LearningPattern
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RecommendationEngine:
|
||||
"""Service for generating incident recommendations based on similarity and patterns"""
|
||||
|
||||
def __init__(self):
|
||||
self.model_version = "v1.0"
|
||||
self.min_similarity_threshold = 0.3
|
||||
self.max_recommendations = 10
|
||||
|
||||
def generate_recommendations(
|
||||
self,
|
||||
incident_id: str,
|
||||
recommendation_types: Optional[List[str]] = None,
|
||||
max_recommendations: int = 5,
|
||||
min_confidence: float = 0.5
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Generate recommendations for a specific incident"""
|
||||
|
||||
try:
|
||||
incident = Incident.objects.get(id=incident_id)
|
||||
recommendations = []
|
||||
|
||||
if not recommendation_types:
|
||||
recommendation_types = [
|
||||
'SIMILAR_INCIDENT', 'KNOWLEDGE_ARTICLE', 'SOLUTION',
|
||||
'EXPERT', 'PREVENTION'
|
||||
]
|
||||
|
||||
# Generate different types of recommendations
|
||||
for rec_type in recommendation_types:
|
||||
if rec_type == 'SIMILAR_INCIDENT':
|
||||
similar_incidents = self._find_similar_incidents(incident, max_recommendations)
|
||||
recommendations.extend(similar_incidents)
|
||||
|
||||
elif rec_type == 'KNOWLEDGE_ARTICLE':
|
||||
knowledge_articles = self._find_relevant_knowledge_articles(incident, max_recommendations)
|
||||
recommendations.extend(knowledge_articles)
|
||||
|
||||
elif rec_type == 'SOLUTION':
|
||||
solutions = self._find_solutions(incident, max_recommendations)
|
||||
recommendations.extend(solutions)
|
||||
|
||||
elif rec_type == 'EXPERT':
|
||||
experts = self._find_experts(incident, max_recommendations)
|
||||
recommendations.extend(experts)
|
||||
|
||||
elif rec_type == 'PREVENTION':
|
||||
prevention_strategies = self._find_prevention_strategies(incident, max_recommendations)
|
||||
recommendations.extend(prevention_strategies)
|
||||
|
||||
# Filter by minimum confidence and sort by confidence score
|
||||
recommendations = [
|
||||
rec for rec in recommendations
|
||||
if rec['confidence_score'] >= min_confidence
|
||||
]
|
||||
recommendations.sort(key=lambda x: x['confidence_score'], reverse=True)
|
||||
|
||||
# Save recommendations to database
|
||||
saved_recommendations = []
|
||||
for rec_data in recommendations[:max_recommendations]:
|
||||
recommendation = self._save_recommendation(incident, rec_data)
|
||||
saved_recommendations.append({
|
||||
'id': str(recommendation.id),
|
||||
'title': recommendation.title,
|
||||
'type': recommendation.recommendation_type,
|
||||
'confidence_score': recommendation.confidence_score,
|
||||
'similarity_score': recommendation.similarity_score
|
||||
})
|
||||
|
||||
return saved_recommendations
|
||||
|
||||
except Incident.DoesNotExist:
|
||||
raise ValueError(f"Incident with ID {incident_id} not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate recommendations for incident {incident_id}: {str(e)}")
|
||||
raise
|
||||
|
||||
def _find_similar_incidents(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find similar incidents based on content and metadata"""
|
||||
|
||||
# Get resolved incidents from the past 6 months
|
||||
six_months_ago = timezone.now() - timedelta(days=180)
|
||||
similar_incidents = Incident.objects.filter(
|
||||
status__in=['RESOLVED', 'CLOSED'],
|
||||
created_at__gte=six_months_ago
|
||||
).exclude(id=incident.id)
|
||||
|
||||
if not similar_incidents.exists():
|
||||
return []
|
||||
|
||||
# Calculate similarity scores
|
||||
incident_text = f"{incident.title} {incident.description} {incident.category} {incident.subcategory or ''}"
|
||||
|
||||
similarities = []
|
||||
for similar_incident in similar_incidents:
|
||||
similar_text = f"{similar_incident.title} {similar_incident.description} {similar_incident.category} {similar_incident.subcategory or ''}"
|
||||
|
||||
# Calculate text similarity
|
||||
text_similarity = self._calculate_text_similarity(incident_text, similar_text)
|
||||
|
||||
# Calculate metadata similarity
|
||||
metadata_similarity = self._calculate_metadata_similarity(incident, similar_incident)
|
||||
|
||||
# Combined similarity score
|
||||
combined_similarity = (text_similarity * 0.7) + (metadata_similarity * 0.3)
|
||||
|
||||
if combined_similarity >= self.min_similarity_threshold:
|
||||
similarities.append({
|
||||
'incident': similar_incident,
|
||||
'similarity_score': combined_similarity,
|
||||
'text_similarity': text_similarity,
|
||||
'metadata_similarity': metadata_similarity
|
||||
})
|
||||
|
||||
# Sort by similarity and return top matches
|
||||
similarities.sort(key=lambda x: x['similarity_score'], reverse=True)
|
||||
|
||||
recommendations = []
|
||||
for sim_data in similarities[:limit]:
|
||||
similar_incident = sim_data['incident']
|
||||
|
||||
recommendations.append({
|
||||
'recommendation_type': 'SIMILAR_INCIDENT',
|
||||
'title': f'Similar Incident: {similar_incident.title}',
|
||||
'description': f'This incident is similar to {similar_incident.title} which was resolved on {similar_incident.resolved_at.strftime("%Y-%m-%d") if similar_incident.resolved_at else "unknown date"}.',
|
||||
'similarity_score': sim_data['similarity_score'],
|
||||
'confidence_score': min(sim_data['similarity_score'] * 1.2, 1.0),
|
||||
'confidence_level': self._get_confidence_level(sim_data['similarity_score']),
|
||||
'related_incident_id': str(similar_incident.id),
|
||||
'suggested_actions': [
|
||||
f'Review how {similar_incident.title} was resolved',
|
||||
'Check if the same resolution approach applies',
|
||||
'Contact the incident assignee for insights'
|
||||
],
|
||||
'expected_outcome': 'Faster resolution by applying proven solutions',
|
||||
'reasoning': f'Incidents are similar based on content ({sim_data["text_similarity"]:.2%}) and metadata ({sim_data["metadata_similarity"]:.2%})',
|
||||
'matching_factors': [
|
||||
f'Category: {similar_incident.category}',
|
||||
f'Severity: {similar_incident.severity}',
|
||||
f'Text similarity: {sim_data["text_similarity"]:.2%}'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _find_relevant_knowledge_articles(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find relevant knowledge base articles"""
|
||||
|
||||
# Search for articles by category and keywords
|
||||
articles = KnowledgeBaseArticle.objects.filter(
|
||||
status='PUBLISHED'
|
||||
).order_by('-view_count', '-updated_at')
|
||||
|
||||
if not articles.exists():
|
||||
return []
|
||||
|
||||
# Calculate relevance scores
|
||||
incident_text = f"{incident.title} {incident.description} {incident.category}"
|
||||
|
||||
recommendations = []
|
||||
for article in articles[:limit]:
|
||||
article_text = f"{article.title} {article.summary} {' '.join(article.tags)}"
|
||||
|
||||
# Calculate text similarity
|
||||
similarity = self._calculate_text_similarity(incident_text, article_text)
|
||||
|
||||
if similarity >= self.min_similarity_threshold:
|
||||
recommendations.append({
|
||||
'recommendation_type': 'KNOWLEDGE_ARTICLE',
|
||||
'title': f'Knowledge Article: {article.title}',
|
||||
'description': f'This knowledge base article may help resolve the incident: {article.summary}',
|
||||
'similarity_score': similarity,
|
||||
'confidence_score': min(similarity * 1.1, 1.0),
|
||||
'confidence_level': self._get_confidence_level(similarity),
|
||||
'knowledge_article_id': str(article.id),
|
||||
'suggested_actions': [
|
||||
f'Read the article: {article.title}',
|
||||
'Follow the procedures outlined in the article',
|
||||
'Apply the troubleshooting steps if applicable'
|
||||
],
|
||||
'expected_outcome': 'Faster resolution using documented procedures',
|
||||
'reasoning': f'Article is relevant based on content similarity ({similarity:.2%}) and category match',
|
||||
'matching_factors': [
|
||||
f'Category: {article.category}',
|
||||
f'Type: {article.article_type}',
|
||||
f'Difficulty: {article.difficulty_level}',
|
||||
f'Views: {article.view_count}'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _find_solutions(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find solutions from resolved similar incidents"""
|
||||
|
||||
# Look for resolved incidents with similar characteristics
|
||||
six_months_ago = timezone.now() - timedelta(days=180)
|
||||
resolved_incidents = Incident.objects.filter(
|
||||
status__in=['RESOLVED', 'CLOSED'],
|
||||
category=incident.category,
|
||||
created_at__gte=six_months_ago
|
||||
).exclude(id=incident.id)
|
||||
|
||||
if not resolved_incidents.exists():
|
||||
return []
|
||||
|
||||
recommendations = []
|
||||
for resolved_incident in resolved_incidents[:limit]:
|
||||
# Calculate how quickly it was resolved
|
||||
resolution_time = None
|
||||
if resolved_incident.resolved_at:
|
||||
resolution_time = resolved_incident.resolved_at - resolved_incident.created_at
|
||||
|
||||
recommendations.append({
|
||||
'recommendation_type': 'SOLUTION',
|
||||
'title': f'Solution from {resolved_incident.title}',
|
||||
'description': f'A similar incident was resolved in {resolution_time} using standard procedures.',
|
||||
'similarity_score': 0.8, # High similarity for same category
|
||||
'confidence_score': 0.85,
|
||||
'confidence_level': 'HIGH',
|
||||
'related_incident_id': str(resolved_incident.id),
|
||||
'suggested_actions': [
|
||||
'Follow the same resolution approach used for the similar incident',
|
||||
'Check if the same root cause applies',
|
||||
'Apply any documented solutions from the incident'
|
||||
],
|
||||
'expected_outcome': 'Faster resolution using proven solutions',
|
||||
'reasoning': f'Similar incident in same category was resolved successfully',
|
||||
'matching_factors': [
|
||||
f'Category: {resolved_incident.category}',
|
||||
f'Resolution time: {resolution_time}',
|
||||
f'Assigned to: {resolved_incident.assigned_to.username if resolved_incident.assigned_to else "Unknown"}'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _find_experts(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find experts who have worked on similar incidents"""
|
||||
|
||||
# Find users who have resolved similar incidents
|
||||
six_months_ago = timezone.now() - timedelta(days=180)
|
||||
expert_incidents = Incident.objects.filter(
|
||||
status__in=['RESOLVED', 'CLOSED'],
|
||||
category=incident.category,
|
||||
assigned_to__isnull=False,
|
||||
created_at__gte=six_months_ago
|
||||
).exclude(id=incident.id)
|
||||
|
||||
# Count incidents per expert
|
||||
expert_counts = {}
|
||||
for expert_incident in expert_incidents:
|
||||
expert = expert_incident.assigned_to
|
||||
if expert not in expert_counts:
|
||||
expert_counts[expert] = {
|
||||
'count': 0,
|
||||
'avg_resolution_time': timedelta(),
|
||||
'incidents': []
|
||||
}
|
||||
expert_counts[expert]['count'] += 1
|
||||
expert_counts[expert]['incidents'].append(expert_incident)
|
||||
|
||||
if expert_incident.resolved_at:
|
||||
resolution_time = expert_incident.resolved_at - expert_incident.created_at
|
||||
expert_counts[expert]['avg_resolution_time'] += resolution_time
|
||||
|
||||
# Calculate average resolution times
|
||||
for expert in expert_counts:
|
||||
if expert_counts[expert]['count'] > 0:
|
||||
expert_counts[expert]['avg_resolution_time'] /= expert_counts[expert]['count']
|
||||
|
||||
# Sort experts by experience and create recommendations
|
||||
sorted_experts = sorted(
|
||||
expert_counts.items(),
|
||||
key=lambda x: (x[1]['count'], -x[1]['avg_resolution_time'].total_seconds()),
|
||||
reverse=True
|
||||
)
|
||||
|
||||
recommendations = []
|
||||
for expert, stats in sorted_experts[:limit]:
|
||||
recommendations.append({
|
||||
'recommendation_type': 'EXPERT',
|
||||
'title': f'Expert: {expert.username}',
|
||||
'description': f'{expert.username} has resolved {stats["count"]} similar incidents with an average resolution time of {stats["avg_resolution_time"]}.',
|
||||
'similarity_score': 0.9, # High similarity for category experts
|
||||
'confidence_score': min(0.7 + (stats['count'] * 0.05), 1.0),
|
||||
'confidence_level': 'HIGH' if stats['count'] >= 3 else 'MEDIUM',
|
||||
'suggested_expert_id': str(expert.id),
|
||||
'suggested_actions': [
|
||||
f'Contact {expert.username} for assistance',
|
||||
'Ask about their experience with similar incidents',
|
||||
'Request guidance on resolution approach'
|
||||
],
|
||||
'expected_outcome': 'Expert guidance for faster resolution',
|
||||
'reasoning': f'Expert has {stats["count"]} successful resolutions in this category',
|
||||
'matching_factors': [
|
||||
f'Category experience: {stats["count"]} incidents',
|
||||
f'Average resolution time: {stats["avg_resolution_time"]}',
|
||||
f'Success rate: 100% (all incidents resolved)'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _find_prevention_strategies(self, incident: Incident, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
"""Find prevention strategies from learning patterns"""
|
||||
|
||||
# Find relevant learning patterns
|
||||
patterns = LearningPattern.objects.filter(
|
||||
is_validated=True,
|
||||
pattern_type__in=['PREVENTION', 'ROOT_CAUSE'],
|
||||
source_incidents__category=incident.category
|
||||
).distinct().order_by('-confidence_score', '-frequency')
|
||||
|
||||
if not patterns.exists():
|
||||
return []
|
||||
|
||||
recommendations = []
|
||||
for pattern in patterns[:limit]:
|
||||
recommendations.append({
|
||||
'recommendation_type': 'PREVENTION',
|
||||
'title': f'Prevention Strategy: {pattern.name}',
|
||||
'description': f'This prevention pattern has been validated and applied {pattern.times_applied} times with a {pattern.success_rate:.1%} success rate.',
|
||||
'similarity_score': 0.8,
|
||||
'confidence_score': pattern.confidence_score,
|
||||
'confidence_level': self._get_confidence_level(pattern.confidence_score),
|
||||
'suggested_actions': pattern.actions[:3], # Top 3 actions
|
||||
'expected_outcome': f'Prevent similar incidents using validated pattern (success rate: {pattern.success_rate:.1%})',
|
||||
'reasoning': f'Validated pattern with {pattern.frequency} observations and {pattern.success_rate:.1%} success rate',
|
||||
'matching_factors': [
|
||||
f'Pattern type: {pattern.pattern_type}',
|
||||
f'Frequency: {pattern.frequency} observations',
|
||||
f'Success rate: {pattern.success_rate:.1%}',
|
||||
f'Validation: Expert validated'
|
||||
]
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _calculate_text_similarity(self, text1: str, text2: str) -> float:
|
||||
"""Calculate text similarity using simple keyword matching (fallback)"""
|
||||
try:
|
||||
if not text1.strip() or not text2.strip():
|
||||
return 0.0
|
||||
|
||||
# Simple keyword-based similarity as fallback
|
||||
words1 = set(text1.lower().split())
|
||||
words2 = set(text2.lower().split())
|
||||
|
||||
if not words1 or not words2:
|
||||
return 0.0
|
||||
|
||||
intersection = words1.intersection(words2)
|
||||
union = words1.union(words2)
|
||||
|
||||
return len(intersection) / len(union) if union else 0.0
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to calculate text similarity: {str(e)}")
|
||||
return 0.0
|
||||
|
||||
def _calculate_metadata_similarity(self, incident1: Incident, incident2: Incident) -> float:
|
||||
"""Calculate similarity based on incident metadata"""
|
||||
similarity = 0.0
|
||||
|
||||
# Category similarity
|
||||
if incident1.category == incident2.category:
|
||||
similarity += 0.4
|
||||
|
||||
# Subcategory similarity
|
||||
if incident1.subcategory == incident2.subcategory and incident1.subcategory:
|
||||
similarity += 0.3
|
||||
|
||||
# Severity similarity
|
||||
severity_weights = {'LOW': 1, 'MEDIUM': 2, 'HIGH': 3, 'CRITICAL': 4, 'EMERGENCY': 5}
|
||||
if incident1.severity in severity_weights and incident2.severity in severity_weights:
|
||||
severity_diff = abs(severity_weights[incident1.severity] - severity_weights[incident2.severity])
|
||||
severity_similarity = max(0, 1 - (severity_diff / 4))
|
||||
similarity += severity_similarity * 0.2
|
||||
|
||||
# Priority similarity
|
||||
if incident1.priority == incident2.priority:
|
||||
similarity += 0.1
|
||||
|
||||
return min(similarity, 1.0)
|
||||
|
||||
def _get_confidence_level(self, score: float) -> str:
|
||||
"""Convert confidence score to confidence level"""
|
||||
if score >= 0.8:
|
||||
return 'VERY_HIGH'
|
||||
elif score >= 0.6:
|
||||
return 'HIGH'
|
||||
elif score >= 0.4:
|
||||
return 'MEDIUM'
|
||||
else:
|
||||
return 'LOW'
|
||||
|
||||
def _save_recommendation(self, incident: Incident, rec_data: Dict[str, Any]) -> IncidentRecommendation:
|
||||
"""Save recommendation to database"""
|
||||
|
||||
# Get related objects
|
||||
related_incident = None
|
||||
if 'related_incident_id' in rec_data:
|
||||
try:
|
||||
related_incident = Incident.objects.get(id=rec_data['related_incident_id'])
|
||||
except Incident.DoesNotExist:
|
||||
pass
|
||||
|
||||
knowledge_article = None
|
||||
if 'knowledge_article_id' in rec_data:
|
||||
try:
|
||||
knowledge_article = KnowledgeBaseArticle.objects.get(id=rec_data['knowledge_article_id'])
|
||||
except KnowledgeBaseArticle.DoesNotExist:
|
||||
pass
|
||||
|
||||
suggested_expert = None
|
||||
if 'suggested_expert_id' in rec_data:
|
||||
try:
|
||||
from django.contrib.auth import get_user_model
|
||||
User = get_user_model()
|
||||
suggested_expert = User.objects.get(id=rec_data['suggested_expert_id'])
|
||||
except User.DoesNotExist:
|
||||
pass
|
||||
|
||||
# Create recommendation
|
||||
recommendation = IncidentRecommendation.objects.create(
|
||||
incident=incident,
|
||||
recommendation_type=rec_data['recommendation_type'],
|
||||
title=rec_data['title'],
|
||||
description=rec_data['description'],
|
||||
similarity_score=rec_data['similarity_score'],
|
||||
confidence_level=rec_data['confidence_level'],
|
||||
confidence_score=rec_data['confidence_score'],
|
||||
related_incident=related_incident,
|
||||
knowledge_article=knowledge_article,
|
||||
suggested_expert=suggested_expert,
|
||||
suggested_actions=rec_data.get('suggested_actions', []),
|
||||
expected_outcome=rec_data.get('expected_outcome', ''),
|
||||
reasoning=rec_data['reasoning'],
|
||||
matching_factors=rec_data.get('matching_factors', []),
|
||||
model_version=self.model_version
|
||||
)
|
||||
|
||||
return recommendation
|
||||
370
ETB-API/knowledge_learning/signals.py
Normal file
370
ETB-API/knowledge_learning/signals.py
Normal file
@@ -0,0 +1,370 @@
|
||||
import logging
|
||||
from django.db.models.signals import post_save, post_delete, pre_save
|
||||
from django.dispatch import receiver
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
|
||||
from .models import (
|
||||
Postmortem, KnowledgeBaseArticle, IncidentRecommendation,
|
||||
LearningPattern, KnowledgeBaseUsage, AutomatedPostmortemGeneration
|
||||
)
|
||||
from incident_intelligence.models import Incident
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@receiver(post_save, sender=Incident)
|
||||
def incident_resolved_handler(sender, instance, created, **kwargs):
|
||||
"""Handle incident resolution and trigger postmortem generation"""
|
||||
|
||||
# Only process if incident was just resolved (not on creation)
|
||||
if not created and instance.status in ['RESOLVED', 'CLOSED']:
|
||||
try:
|
||||
# Check if incident severity warrants automatic postmortem generation
|
||||
if instance.severity in ['HIGH', 'CRITICAL', 'EMERGENCY']:
|
||||
from .services.postmortem_generator import PostmortemGenerator
|
||||
|
||||
generator = PostmortemGenerator()
|
||||
|
||||
# Generate postmortem asynchronously (in production, use Celery)
|
||||
try:
|
||||
result = generator.generate_postmortem_for_incident(
|
||||
incident_id=str(instance.id),
|
||||
include_timeline=True,
|
||||
include_logs=True,
|
||||
trigger='incident_resolved'
|
||||
)
|
||||
|
||||
logger.info(f"Generated postmortem for incident {instance.id}: {result['postmortem_id']}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate postmortem for incident {instance.id}: {str(e)}")
|
||||
|
||||
# Generate recommendations for similar incidents
|
||||
from .services.recommendation_engine import RecommendationEngine
|
||||
|
||||
recommendation_engine = RecommendationEngine()
|
||||
|
||||
try:
|
||||
recommendations = recommendation_engine.generate_recommendations(
|
||||
incident_id=str(instance.id),
|
||||
max_recommendations=3,
|
||||
min_confidence=0.6
|
||||
)
|
||||
|
||||
logger.info(f"Generated {len(recommendations)} recommendations for incident {instance.id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate recommendations for incident {instance.id}: {str(e)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in incident_resolved_handler: {str(e)}")
|
||||
|
||||
|
||||
@receiver(post_save, sender=Postmortem)
|
||||
def postmortem_saved_handler(sender, instance, created, **kwargs):
|
||||
"""Handle postmortem creation and updates"""
|
||||
|
||||
if created:
|
||||
# Set default due date if not provided
|
||||
if not instance.due_date:
|
||||
instance.due_date = timezone.now() + timedelta(days=7)
|
||||
instance.save(update_fields=['due_date'])
|
||||
|
||||
logger.info(f"Created postmortem {instance.id} for incident {instance.incident.id}")
|
||||
|
||||
# If postmortem is published, create knowledge base articles
|
||||
if instance.status == 'PUBLISHED' and not created:
|
||||
try:
|
||||
create_knowledge_articles_from_postmortem(instance)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create knowledge articles from postmortem {instance.id}: {str(e)}")
|
||||
|
||||
|
||||
@receiver(post_save, sender=KnowledgeBaseArticle)
|
||||
def knowledge_article_saved_handler(sender, instance, created, **kwargs):
|
||||
"""Handle knowledge base article creation and updates"""
|
||||
|
||||
if created:
|
||||
# Set default review date if not provided
|
||||
if not instance.next_review_due:
|
||||
instance.next_review_due = timezone.now() + timedelta(days=90) # Review in 3 months
|
||||
instance.save(update_fields=['next_review_due'])
|
||||
|
||||
logger.info(f"Created knowledge base article {instance.id}: {instance.title}")
|
||||
|
||||
# Update search keywords if not provided
|
||||
if not instance.search_keywords:
|
||||
instance.search_keywords = extract_keywords_from_article(instance)
|
||||
instance.save(update_fields=['search_keywords'])
|
||||
|
||||
|
||||
@receiver(post_save, sender=IncidentRecommendation)
|
||||
def recommendation_applied_handler(sender, instance, created, **kwargs):
|
||||
"""Handle recommendation application and learning pattern updates"""
|
||||
|
||||
if instance.is_applied and not created:
|
||||
# Update learning patterns based on applied recommendations
|
||||
try:
|
||||
update_learning_patterns_from_recommendation(instance)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update learning patterns from recommendation {instance.id}: {str(e)}")
|
||||
|
||||
logger.info(f"Applied recommendation {instance.id} for incident {instance.incident.id}")
|
||||
|
||||
|
||||
@receiver(post_save, sender=KnowledgeBaseUsage)
|
||||
def knowledge_usage_handler(sender, instance, created, **kwargs):
|
||||
"""Handle knowledge base usage tracking"""
|
||||
|
||||
if created:
|
||||
# Update article popularity metrics
|
||||
if instance.knowledge_article and instance.usage_type == 'VIEW':
|
||||
# Increment view count (this is also handled in the model method)
|
||||
pass
|
||||
|
||||
# Track recommendation effectiveness
|
||||
if instance.recommendation and instance.usage_type == 'APPLY':
|
||||
# This could trigger learning pattern updates
|
||||
pass
|
||||
|
||||
logger.debug(f"Recorded knowledge usage: {instance.usage_type} by {instance.user.username}")
|
||||
|
||||
|
||||
@receiver(post_save, sender=LearningPattern)
|
||||
def learning_pattern_updated_handler(sender, instance, created, **kwargs):
|
||||
"""Handle learning pattern updates"""
|
||||
|
||||
if created:
|
||||
logger.info(f"Created learning pattern {instance.id}: {instance.name}")
|
||||
else:
|
||||
# If pattern is validated, update related recommendations
|
||||
if instance.is_validated:
|
||||
try:
|
||||
update_recommendations_from_validated_pattern(instance)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update recommendations from pattern {instance.id}: {str(e)}")
|
||||
|
||||
|
||||
def create_knowledge_articles_from_postmortem(postmortem: Postmortem):
|
||||
"""Create knowledge base articles from postmortem content"""
|
||||
|
||||
articles_created = []
|
||||
|
||||
# Create runbook from action items
|
||||
if postmortem.action_items:
|
||||
runbook_content = f"""
|
||||
# Runbook: {postmortem.title}
|
||||
|
||||
## Overview
|
||||
This runbook was created from postmortem analysis of incident: {postmortem.incident.title}
|
||||
|
||||
## Root Cause
|
||||
{postmortem.root_cause_analysis}
|
||||
|
||||
## Action Items
|
||||
"""
|
||||
for i, action_item in enumerate(postmortem.action_items, 1):
|
||||
runbook_content += f"\n### {i}. {action_item.get('title', 'Action Item')}\n"
|
||||
runbook_content += f"{action_item.get('description', '')}\n"
|
||||
if action_item.get('priority'):
|
||||
runbook_content += f"**Priority:** {action_item['priority']}\n"
|
||||
|
||||
runbook = KnowledgeBaseArticle.objects.create(
|
||||
title=f"Runbook: {postmortem.incident.category} - {postmortem.incident.title}",
|
||||
slug=f"runbook-{postmortem.incident.category.lower()}-{postmortem.id}",
|
||||
content=runbook_content,
|
||||
summary=f"Runbook created from postmortem analysis of {postmortem.incident.title}",
|
||||
article_type='RUNBOOK',
|
||||
category=postmortem.incident.category,
|
||||
subcategory=postmortem.incident.subcategory,
|
||||
tags=[postmortem.incident.category, 'runbook', 'postmortem'],
|
||||
related_services=postmortem.affected_services,
|
||||
status='DRAFT',
|
||||
author=postmortem.owner,
|
||||
source_postmortems=[postmortem],
|
||||
related_incidents=[postmortem.incident]
|
||||
)
|
||||
articles_created.append(runbook)
|
||||
|
||||
# Create troubleshooting guide from lessons learned
|
||||
if postmortem.lessons_learned:
|
||||
troubleshooting_content = f"""
|
||||
# Troubleshooting Guide: {postmortem.incident.category}
|
||||
|
||||
## Overview
|
||||
This troubleshooting guide was created from lessons learned in incident: {postmortem.incident.title}
|
||||
|
||||
## Lessons Learned
|
||||
{postmortem.lessons_learned}
|
||||
|
||||
## Common Issues and Solutions
|
||||
Based on the incident analysis, here are common issues and their solutions:
|
||||
|
||||
### Issue: {postmortem.incident.title}
|
||||
**Symptoms:** {postmortem.incident.description[:200]}...
|
||||
**Solution:** {postmortem.root_cause_analysis[:300]}...
|
||||
**Prevention:** {postmortem.lessons_learned[:300]}...
|
||||
"""
|
||||
|
||||
troubleshooting = KnowledgeBaseArticle.objects.create(
|
||||
title=f"Troubleshooting: {postmortem.incident.category} Issues",
|
||||
slug=f"troubleshooting-{postmortem.incident.category.lower()}-{postmortem.id}",
|
||||
content=troubleshooting_content,
|
||||
summary=f"Troubleshooting guide for {postmortem.incident.category} issues based on incident analysis",
|
||||
article_type='TROUBLESHOOTING',
|
||||
category=postmortem.incident.category,
|
||||
subcategory=postmortem.incident.subcategory,
|
||||
tags=[postmortem.incident.category, 'troubleshooting', 'lessons-learned'],
|
||||
related_services=postmortem.affected_services,
|
||||
status='DRAFT',
|
||||
author=postmortem.owner,
|
||||
source_postmortems=[postmortem],
|
||||
related_incidents=[postmortem.incident]
|
||||
)
|
||||
articles_created.append(troubleshooting)
|
||||
|
||||
logger.info(f"Created {len(articles_created)} knowledge articles from postmortem {postmortem.id}")
|
||||
return articles_created
|
||||
|
||||
|
||||
def extract_keywords_from_article(article: KnowledgeBaseArticle) -> list:
|
||||
"""Extract keywords from article content for search optimization"""
|
||||
|
||||
import re
|
||||
|
||||
# Combine title, summary, and content
|
||||
text = f"{article.title} {article.summary} {article.content}"
|
||||
|
||||
# Extract words (simple approach - in production, use more sophisticated NLP)
|
||||
words = re.findall(r'\b[a-zA-Z]{3,}\b', text.lower())
|
||||
|
||||
# Remove common stop words
|
||||
stop_words = {
|
||||
'the', 'and', 'for', 'are', 'but', 'not', 'you', 'all', 'can', 'had', 'her', 'was', 'one', 'our',
|
||||
'out', 'day', 'get', 'has', 'him', 'his', 'how', 'its', 'may', 'new', 'now', 'old', 'see', 'two',
|
||||
'who', 'boy', 'did', 'man', 'oil', 'sit', 'try', 'use', 'way', 'will', 'with', 'this', 'that',
|
||||
'from', 'they', 'know', 'want', 'been', 'good', 'much', 'some', 'time', 'very', 'when', 'come',
|
||||
'here', 'just', 'like', 'long', 'make', 'many', 'over', 'such', 'take', 'than', 'them', 'well',
|
||||
'were', 'what', 'your', 'about', 'after', 'again', 'before', 'could', 'every', 'first', 'great',
|
||||
'little', 'might', 'never', 'other', 'place', 'right', 'should', 'still', 'think', 'under',
|
||||
'water', 'where', 'while', 'world', 'years', 'being', 'called', 'found', 'going', 'having',
|
||||
'known', 'large', 'looked', 'making', 'number', 'people', 'seemed', 'small', 'taken', 'turned',
|
||||
'wanted', 'without', 'working'
|
||||
}
|
||||
|
||||
# Filter out stop words and get unique words
|
||||
keywords = list(set([word for word in words if word not in stop_words]))
|
||||
|
||||
# Limit to top 20 keywords
|
||||
return keywords[:20]
|
||||
|
||||
|
||||
def update_learning_patterns_from_recommendation(recommendation: IncidentRecommendation):
|
||||
"""Update learning patterns based on applied recommendations"""
|
||||
|
||||
# Find or create learning pattern based on recommendation type and incident category
|
||||
pattern_name = f"{recommendation.recommendation_type} Pattern for {recommendation.incident.category}"
|
||||
|
||||
pattern, created = LearningPattern.objects.get_or_create(
|
||||
name=pattern_name,
|
||||
pattern_type=recommendation.recommendation_type,
|
||||
defaults={
|
||||
'description': f"Pattern learned from applied recommendations for {recommendation.incident.category} incidents",
|
||||
'frequency': 1,
|
||||
'success_rate': 1.0, # Initial success rate
|
||||
'confidence_score': recommendation.confidence_score,
|
||||
'triggers': [recommendation.incident.category],
|
||||
'actions': recommendation.suggested_actions,
|
||||
'outcomes': [recommendation.expected_outcome] if recommendation.expected_outcome else []
|
||||
}
|
||||
)
|
||||
|
||||
if not created:
|
||||
# Update existing pattern
|
||||
pattern.frequency += 1
|
||||
pattern.source_incidents.add(recommendation.incident)
|
||||
|
||||
# Update success rate based on effectiveness rating
|
||||
if recommendation.effectiveness_rating:
|
||||
# Simple moving average for success rate
|
||||
current_success = (recommendation.effectiveness_rating - 1) / 4 # Convert 1-5 to 0-1
|
||||
pattern.success_rate = ((pattern.success_rate * (pattern.frequency - 1)) + current_success) / pattern.frequency
|
||||
|
||||
# Update confidence score
|
||||
pattern.confidence_score = max(pattern.confidence_score, recommendation.confidence_score)
|
||||
|
||||
pattern.save()
|
||||
|
||||
# Add source incident and postmortem if available
|
||||
pattern.source_incidents.add(recommendation.incident)
|
||||
if hasattr(recommendation.incident, 'postmortems'):
|
||||
for postmortem in recommendation.incident.postmortems.all():
|
||||
pattern.source_postmortems.add(postmortem)
|
||||
|
||||
|
||||
def update_recommendations_from_validated_pattern(pattern: LearningPattern):
|
||||
"""Update existing recommendations based on validated learning patterns"""
|
||||
|
||||
# Find recommendations that could benefit from this validated pattern
|
||||
related_recommendations = IncidentRecommendation.objects.filter(
|
||||
recommendation_type=pattern.pattern_type,
|
||||
confidence_score__lt=pattern.confidence_score
|
||||
)
|
||||
|
||||
for recommendation in related_recommendations:
|
||||
# Update confidence score if pattern is more confident
|
||||
if pattern.confidence_score > recommendation.confidence_score:
|
||||
recommendation.confidence_score = pattern.confidence_score
|
||||
recommendation.confidence_level = get_confidence_level(pattern.confidence_score)
|
||||
recommendation.save(update_fields=['confidence_score', 'confidence_level'])
|
||||
|
||||
|
||||
def get_confidence_level(score: float) -> str:
|
||||
"""Convert confidence score to confidence level"""
|
||||
if score >= 0.8:
|
||||
return 'VERY_HIGH'
|
||||
elif score >= 0.6:
|
||||
return 'HIGH'
|
||||
elif score >= 0.4:
|
||||
return 'MEDIUM'
|
||||
else:
|
||||
return 'LOW'
|
||||
|
||||
|
||||
# Additional signal handlers for cleanup and maintenance
|
||||
|
||||
@receiver(post_delete, sender=KnowledgeBaseArticle)
|
||||
def knowledge_article_deleted_handler(sender, instance, **kwargs):
|
||||
"""Handle knowledge base article deletion"""
|
||||
logger.info(f"Deleted knowledge base article {instance.id}: {instance.title}")
|
||||
|
||||
|
||||
@receiver(post_delete, sender=Postmortem)
|
||||
def postmortem_deleted_handler(sender, instance, **kwargs):
|
||||
"""Handle postmortem deletion"""
|
||||
logger.info(f"Deleted postmortem {instance.id} for incident {instance.incident.id}")
|
||||
|
||||
|
||||
@receiver(pre_save, sender=KnowledgeBaseArticle)
|
||||
def knowledge_article_pre_save_handler(sender, instance, **kwargs):
|
||||
"""Handle knowledge base article before save"""
|
||||
|
||||
# Auto-generate slug if not provided
|
||||
if not instance.slug and instance.title:
|
||||
import re
|
||||
slug = re.sub(r'[^\w\s-]', '', instance.title.lower())
|
||||
slug = re.sub(r'[-\s]+', '-', slug)
|
||||
instance.slug = slug[:50] # Limit slug length
|
||||
|
||||
# Update last_updated_by if content changed
|
||||
if instance.pk: # Only for updates, not creation
|
||||
try:
|
||||
old_instance = KnowledgeBaseArticle.objects.get(pk=instance.pk)
|
||||
if (old_instance.content != instance.content or
|
||||
old_instance.title != instance.title or
|
||||
old_instance.summary != instance.summary):
|
||||
# Content changed, but we can't set last_updated_by here as it's not in the signal context
|
||||
pass
|
||||
except KnowledgeBaseArticle.DoesNotExist:
|
||||
pass
|
||||
3
ETB-API/knowledge_learning/tests.py
Normal file
3
ETB-API/knowledge_learning/tests.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.test import TestCase
|
||||
|
||||
# Create your tests here.
|
||||
21
ETB-API/knowledge_learning/urls.py
Normal file
21
ETB-API/knowledge_learning/urls.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from django.urls import path, include
|
||||
from rest_framework.routers import DefaultRouter
|
||||
from .views.knowledge import (
|
||||
PostmortemViewSet, KnowledgeBaseArticleViewSet,
|
||||
IncidentRecommendationViewSet, LearningPatternViewSet,
|
||||
AutomatedPostmortemGenerationViewSet
|
||||
)
|
||||
|
||||
# Create router and register viewsets
|
||||
router = DefaultRouter()
|
||||
router.register(r'postmortems', PostmortemViewSet, basename='postmortem')
|
||||
router.register(r'knowledge-articles', KnowledgeBaseArticleViewSet, basename='knowledge-article')
|
||||
router.register(r'recommendations', IncidentRecommendationViewSet, basename='recommendation')
|
||||
router.register(r'learning-patterns', LearningPatternViewSet, basename='learning-pattern')
|
||||
router.register(r'postmortem-generations', AutomatedPostmortemGenerationViewSet, basename='postmortem-generation')
|
||||
|
||||
app_name = 'knowledge_learning'
|
||||
|
||||
urlpatterns = [
|
||||
path('', include(router.urls)),
|
||||
]
|
||||
3
ETB-API/knowledge_learning/views.py
Normal file
3
ETB-API/knowledge_learning/views.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.shortcuts import render
|
||||
|
||||
# Create your views here.
|
||||
1
ETB-API/knowledge_learning/views/__init__.py
Normal file
1
ETB-API/knowledge_learning/views/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Knowledge Learning Views
|
||||
Binary file not shown.
Binary file not shown.
521
ETB-API/knowledge_learning/views/knowledge.py
Normal file
521
ETB-API/knowledge_learning/views/knowledge.py
Normal file
@@ -0,0 +1,521 @@
|
||||
from rest_framework import viewsets, status, filters
|
||||
from rest_framework.decorators import action
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from django_filters.rest_framework import DjangoFilterBackend
|
||||
from django.db.models import Q, Count, Avg
|
||||
from django.utils import timezone
|
||||
from django.shortcuts import get_object_or_404
|
||||
|
||||
from ..models import (
|
||||
Postmortem, KnowledgeBaseArticle, IncidentRecommendation,
|
||||
LearningPattern, KnowledgeBaseUsage, AutomatedPostmortemGeneration
|
||||
)
|
||||
from ..serializers.knowledge import (
|
||||
PostmortemSerializer, PostmortemListSerializer,
|
||||
KnowledgeBaseArticleSerializer, KnowledgeBaseArticleListSerializer,
|
||||
IncidentRecommendationSerializer, IncidentRecommendationListSerializer,
|
||||
LearningPatternSerializer, LearningPatternListSerializer,
|
||||
KnowledgeBaseUsageSerializer, AutomatedPostmortemGenerationSerializer,
|
||||
PostmortemGenerationRequestSerializer, RecommendationRequestSerializer,
|
||||
KnowledgeBaseSearchSerializer, KnowledgeBaseArticleRatingSerializer
|
||||
)
|
||||
from ..services.postmortem_generator import PostmortemGenerator
|
||||
from ..services.recommendation_engine import RecommendationEngine
|
||||
from ..services.knowledge_base_search import KnowledgeBaseSearchService
|
||||
|
||||
|
||||
class PostmortemViewSet(viewsets.ModelViewSet):
|
||||
"""ViewSet for managing postmortems"""
|
||||
|
||||
queryset = Postmortem.objects.all()
|
||||
serializer_class = PostmortemSerializer
|
||||
permission_classes = [IsAuthenticated]
|
||||
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
|
||||
filterset_fields = ['status', 'severity', 'is_automated', 'owner']
|
||||
search_fields = ['title', 'executive_summary', 'root_cause_analysis']
|
||||
ordering_fields = ['created_at', 'updated_at', 'due_date', 'severity']
|
||||
ordering = ['-created_at']
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""Return appropriate serializer based on action"""
|
||||
if self.action == 'list':
|
||||
return PostmortemListSerializer
|
||||
return PostmortemSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
"""Filter queryset based on user permissions"""
|
||||
queryset = super().get_queryset()
|
||||
|
||||
# Filter by user access permissions
|
||||
if not self.request.user.is_staff:
|
||||
# Non-staff users can only see postmortems they own or are involved in
|
||||
queryset = queryset.filter(
|
||||
Q(owner=self.request.user) |
|
||||
Q(reviewers=self.request.user) |
|
||||
Q(approver=self.request.user)
|
||||
).distinct()
|
||||
|
||||
return queryset
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def generate_automated(self, request, pk=None):
|
||||
"""Generate automated postmortem for an incident"""
|
||||
postmortem = self.get_object()
|
||||
|
||||
if postmortem.is_automated:
|
||||
return Response(
|
||||
{'error': 'Postmortem is already automated'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
generator = PostmortemGenerator()
|
||||
try:
|
||||
generated_content = generator.generate_postmortem(postmortem.incident)
|
||||
|
||||
# Update postmortem with generated content
|
||||
postmortem.executive_summary = generated_content.get('executive_summary', '')
|
||||
postmortem.timeline = generated_content.get('timeline', [])
|
||||
postmortem.root_cause_analysis = generated_content.get('root_cause_analysis', '')
|
||||
postmortem.impact_assessment = generated_content.get('impact_assessment', '')
|
||||
postmortem.lessons_learned = generated_content.get('lessons_learned', '')
|
||||
postmortem.action_items = generated_content.get('action_items', [])
|
||||
postmortem.is_automated = True
|
||||
postmortem.generation_confidence = generated_content.get('confidence_score', 0.0)
|
||||
postmortem.auto_generated_sections = generated_content.get('generated_sections', [])
|
||||
postmortem.save()
|
||||
|
||||
return Response({
|
||||
'message': 'Postmortem generated successfully',
|
||||
'confidence_score': postmortem.generation_confidence
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{'error': f'Failed to generate postmortem: {str(e)}'},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def approve(self, request, pk=None):
|
||||
"""Approve a postmortem"""
|
||||
postmortem = self.get_object()
|
||||
|
||||
if postmortem.status != 'IN_REVIEW':
|
||||
return Response(
|
||||
{'error': 'Postmortem must be in review status to approve'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
postmortem.status = 'APPROVED'
|
||||
postmortem.approver = request.user
|
||||
postmortem.save()
|
||||
|
||||
return Response({'message': 'Postmortem approved successfully'})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def publish(self, request, pk=None):
|
||||
"""Publish a postmortem"""
|
||||
postmortem = self.get_object()
|
||||
|
||||
if postmortem.status != 'APPROVED':
|
||||
return Response(
|
||||
{'error': 'Postmortem must be approved before publishing'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
postmortem.status = 'PUBLISHED'
|
||||
postmortem.published_at = timezone.now()
|
||||
postmortem.save()
|
||||
|
||||
return Response({'message': 'Postmortem published successfully'})
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def overdue(self, request):
|
||||
"""Get overdue postmortems"""
|
||||
overdue_postmortems = self.get_queryset().filter(
|
||||
due_date__lt=timezone.now(),
|
||||
status__in=['DRAFT', 'IN_REVIEW']
|
||||
)
|
||||
|
||||
serializer = self.get_serializer(overdue_postmortems, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def statistics(self, request):
|
||||
"""Get postmortem statistics"""
|
||||
queryset = self.get_queryset()
|
||||
|
||||
stats = {
|
||||
'total_postmortems': queryset.count(),
|
||||
'by_status': dict(queryset.values('status').annotate(count=Count('id')).values_list('status', 'count')),
|
||||
'by_severity': dict(queryset.values('severity').annotate(count=Count('id')).values_list('severity', 'count')),
|
||||
'automated_percentage': queryset.filter(is_automated=True).count() / max(queryset.count(), 1) * 100,
|
||||
'overdue_count': queryset.filter(
|
||||
due_date__lt=timezone.now(),
|
||||
status__in=['DRAFT', 'IN_REVIEW']
|
||||
).count(),
|
||||
'avg_completion_time': queryset.filter(
|
||||
published_at__isnull=False
|
||||
).aggregate(
|
||||
avg_time=Avg('published_at' - 'created_at')
|
||||
)['avg_time']
|
||||
}
|
||||
|
||||
return Response(stats)
|
||||
|
||||
|
||||
class KnowledgeBaseArticleViewSet(viewsets.ModelViewSet):
|
||||
"""ViewSet for managing knowledge base articles"""
|
||||
|
||||
queryset = KnowledgeBaseArticle.objects.all()
|
||||
serializer_class = KnowledgeBaseArticleSerializer
|
||||
permission_classes = [IsAuthenticated]
|
||||
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
|
||||
filterset_fields = ['article_type', 'category', 'subcategory', 'status', 'is_featured', 'difficulty_level']
|
||||
search_fields = ['title', 'content', 'summary', 'tags', 'search_keywords']
|
||||
ordering_fields = ['created_at', 'updated_at', 'view_count', 'title']
|
||||
ordering = ['-updated_at']
|
||||
lookup_field = 'slug'
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""Return appropriate serializer based on action"""
|
||||
if self.action == 'list':
|
||||
return KnowledgeBaseArticleListSerializer
|
||||
return KnowledgeBaseArticleSerializer
|
||||
|
||||
def retrieve(self, request, *args, **kwargs):
|
||||
"""Retrieve article and increment view count"""
|
||||
instance = self.get_object()
|
||||
instance.increment_view_count()
|
||||
|
||||
# Log the view
|
||||
KnowledgeBaseUsage.objects.create(
|
||||
user=request.user,
|
||||
usage_type='VIEW',
|
||||
knowledge_article=instance,
|
||||
context={'ip_address': request.META.get('REMOTE_ADDR')}
|
||||
)
|
||||
|
||||
serializer = self.get_serializer(instance)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def rate(self, request, slug=None):
|
||||
"""Rate a knowledge base article"""
|
||||
article = self.get_object()
|
||||
serializer = KnowledgeBaseArticleRatingSerializer(data=request.data)
|
||||
|
||||
if serializer.is_valid():
|
||||
# Log the rating
|
||||
KnowledgeBaseUsage.objects.create(
|
||||
user=request.user,
|
||||
usage_type='RATE',
|
||||
knowledge_article=article,
|
||||
context={
|
||||
'rating': serializer.validated_data['rating'],
|
||||
'feedback': serializer.validated_data.get('feedback', '')
|
||||
}
|
||||
)
|
||||
|
||||
return Response({'message': 'Rating recorded successfully'})
|
||||
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def bookmark(self, request, slug=None):
|
||||
"""Bookmark a knowledge base article"""
|
||||
article = self.get_object()
|
||||
|
||||
# Log the bookmark
|
||||
KnowledgeBaseUsage.objects.create(
|
||||
user=request.user,
|
||||
usage_type='BOOKMARK',
|
||||
knowledge_article=article
|
||||
)
|
||||
|
||||
return Response({'message': 'Article bookmarked successfully'})
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def search(self, request):
|
||||
"""Search knowledge base articles"""
|
||||
serializer = KnowledgeBaseSearchSerializer(data=request.data)
|
||||
|
||||
if serializer.is_valid():
|
||||
search_service = KnowledgeBaseSearchService()
|
||||
results = search_service.search(
|
||||
query=serializer.validated_data['query'],
|
||||
article_types=serializer.validated_data.get('article_types'),
|
||||
categories=serializer.validated_data.get('categories'),
|
||||
difficulty_levels=serializer.validated_data.get('difficulty_levels'),
|
||||
limit=serializer.validated_data['limit'],
|
||||
offset=serializer.validated_data['offset']
|
||||
)
|
||||
|
||||
return Response(results)
|
||||
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def due_for_review(self, request):
|
||||
"""Get articles due for review"""
|
||||
due_articles = self.get_queryset().filter(
|
||||
next_review_due__lt=timezone.now()
|
||||
)
|
||||
|
||||
serializer = self.get_serializer(due_articles, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def popular(self, request):
|
||||
"""Get popular articles"""
|
||||
popular_articles = self.get_queryset().filter(
|
||||
status='PUBLISHED'
|
||||
).order_by('-view_count')[:10]
|
||||
|
||||
serializer = self.get_serializer(popular_articles, many=True)
|
||||
return Response(serializer.data)
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def statistics(self, request):
|
||||
"""Get knowledge base statistics"""
|
||||
queryset = self.get_queryset()
|
||||
|
||||
stats = {
|
||||
'total_articles': queryset.count(),
|
||||
'by_type': dict(queryset.values('article_type').annotate(count=Count('id')).values_list('article_type', 'count')),
|
||||
'by_status': dict(queryset.values('status').annotate(count=Count('id')).values_list('status', 'count')),
|
||||
'by_difficulty': dict(queryset.values('difficulty_level').annotate(count=Count('id')).values_list('difficulty_level', 'count')),
|
||||
'total_views': queryset.aggregate(total_views=Count('view_count'))['total_views'],
|
||||
'due_for_review': queryset.filter(next_review_due__lt=timezone.now()).count(),
|
||||
'featured_articles': queryset.filter(is_featured=True).count()
|
||||
}
|
||||
|
||||
return Response(stats)
|
||||
|
||||
|
||||
class IncidentRecommendationViewSet(viewsets.ModelViewSet):
|
||||
"""ViewSet for managing incident recommendations"""
|
||||
|
||||
queryset = IncidentRecommendation.objects.all()
|
||||
serializer_class = IncidentRecommendationSerializer
|
||||
permission_classes = [IsAuthenticated]
|
||||
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
|
||||
filterset_fields = ['recommendation_type', 'confidence_level', 'is_applied', 'incident']
|
||||
search_fields = ['title', 'description', 'reasoning']
|
||||
ordering_fields = ['created_at', 'confidence_score', 'similarity_score']
|
||||
ordering = ['-confidence_score', '-similarity_score']
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""Return appropriate serializer based on action"""
|
||||
if self.action == 'list':
|
||||
return IncidentRecommendationListSerializer
|
||||
return IncidentRecommendationSerializer
|
||||
|
||||
def get_queryset(self):
|
||||
"""Filter queryset based on user permissions"""
|
||||
queryset = super().get_queryset()
|
||||
|
||||
# Filter by incident access permissions
|
||||
if not self.request.user.is_staff:
|
||||
from incident_intelligence.models import Incident
|
||||
accessible_incidents = Incident.objects.filter(
|
||||
Q(assigned_to=self.request.user) |
|
||||
Q(reporter=self.request.user)
|
||||
)
|
||||
queryset = queryset.filter(incident__in=accessible_incidents)
|
||||
|
||||
return queryset
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def apply(self, request, pk=None):
|
||||
"""Apply a recommendation"""
|
||||
recommendation = self.get_object()
|
||||
|
||||
if recommendation.is_applied:
|
||||
return Response(
|
||||
{'error': 'Recommendation has already been applied'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
recommendation.is_applied = True
|
||||
recommendation.applied_at = timezone.now()
|
||||
recommendation.applied_by = request.user
|
||||
recommendation.save()
|
||||
|
||||
# Log the application
|
||||
KnowledgeBaseUsage.objects.create(
|
||||
user=request.user,
|
||||
usage_type='APPLY',
|
||||
recommendation=recommendation,
|
||||
incident=recommendation.incident
|
||||
)
|
||||
|
||||
return Response({'message': 'Recommendation applied successfully'})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def rate_effectiveness(self, request, pk=None):
|
||||
"""Rate the effectiveness of a recommendation"""
|
||||
recommendation = self.get_object()
|
||||
rating = request.data.get('rating')
|
||||
|
||||
if not rating or not (1 <= rating <= 5):
|
||||
return Response(
|
||||
{'error': 'Rating must be between 1 and 5'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
recommendation.effectiveness_rating = rating
|
||||
recommendation.save()
|
||||
|
||||
return Response({'message': 'Effectiveness rating recorded successfully'})
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def generate_for_incident(self, request):
|
||||
"""Generate recommendations for an incident"""
|
||||
serializer = RecommendationRequestSerializer(data=request.data)
|
||||
|
||||
if serializer.is_valid():
|
||||
recommendation_engine = RecommendationEngine()
|
||||
try:
|
||||
recommendations = recommendation_engine.generate_recommendations(
|
||||
incident_id=serializer.validated_data['incident_id'],
|
||||
recommendation_types=serializer.validated_data.get('recommendation_types'),
|
||||
max_recommendations=serializer.validated_data['max_recommendations'],
|
||||
min_confidence=serializer.validated_data['min_confidence']
|
||||
)
|
||||
|
||||
return Response({
|
||||
'message': 'Recommendations generated successfully',
|
||||
'recommendations': recommendations
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{'error': f'Failed to generate recommendations: {str(e)}'},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def statistics(self, request):
|
||||
"""Get recommendation statistics"""
|
||||
queryset = self.get_queryset()
|
||||
|
||||
stats = {
|
||||
'total_recommendations': queryset.count(),
|
||||
'by_type': dict(queryset.values('recommendation_type').annotate(count=Count('id')).values_list('recommendation_type', 'count')),
|
||||
'by_confidence': dict(queryset.values('confidence_level').annotate(count=Count('id')).values_list('confidence_level', 'count')),
|
||||
'applied_count': queryset.filter(is_applied=True).count(),
|
||||
'avg_effectiveness': queryset.filter(
|
||||
effectiveness_rating__isnull=False
|
||||
).aggregate(avg_rating=Avg('effectiveness_rating'))['avg_rating'],
|
||||
'high_confidence_count': queryset.filter(confidence_score__gte=0.8).count()
|
||||
}
|
||||
|
||||
return Response(stats)
|
||||
|
||||
|
||||
class LearningPatternViewSet(viewsets.ModelViewSet):
|
||||
"""ViewSet for managing learning patterns"""
|
||||
|
||||
queryset = LearningPattern.objects.all()
|
||||
serializer_class = LearningPatternSerializer
|
||||
permission_classes = [IsAuthenticated]
|
||||
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
|
||||
filterset_fields = ['pattern_type', 'is_validated']
|
||||
search_fields = ['name', 'description', 'triggers', 'actions']
|
||||
ordering_fields = ['created_at', 'confidence_score', 'frequency', 'success_rate']
|
||||
ordering = ['-confidence_score', '-frequency']
|
||||
|
||||
def get_serializer_class(self):
|
||||
"""Return appropriate serializer based on action"""
|
||||
if self.action == 'list':
|
||||
return LearningPatternListSerializer
|
||||
return LearningPatternSerializer
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def validate(self, request, pk=None):
|
||||
"""Validate a learning pattern"""
|
||||
pattern = self.get_object()
|
||||
|
||||
if pattern.is_validated:
|
||||
return Response(
|
||||
{'error': 'Pattern has already been validated'},
|
||||
status=status.HTTP_400_BAD_REQUEST
|
||||
)
|
||||
|
||||
pattern.is_validated = True
|
||||
pattern.validated_by = request.user
|
||||
pattern.validation_notes = request.data.get('validation_notes', '')
|
||||
pattern.save()
|
||||
|
||||
return Response({'message': 'Pattern validated successfully'})
|
||||
|
||||
@action(detail=True, methods=['post'])
|
||||
def apply(self, request, pk=None):
|
||||
"""Apply a learning pattern"""
|
||||
pattern = self.get_object()
|
||||
|
||||
pattern.times_applied += 1
|
||||
pattern.last_applied = timezone.now()
|
||||
pattern.save()
|
||||
|
||||
return Response({'message': 'Pattern applied successfully'})
|
||||
|
||||
@action(detail=False, methods=['get'])
|
||||
def statistics(self, request):
|
||||
"""Get learning pattern statistics"""
|
||||
queryset = self.get_queryset()
|
||||
|
||||
stats = {
|
||||
'total_patterns': queryset.count(),
|
||||
'by_type': dict(queryset.values('pattern_type').annotate(count=Count('id')).values_list('pattern_type', 'count')),
|
||||
'validated_count': queryset.filter(is_validated=True).count(),
|
||||
'avg_confidence': queryset.aggregate(avg_confidence=Avg('confidence_score'))['avg_confidence'],
|
||||
'avg_success_rate': queryset.aggregate(avg_success=Avg('success_rate'))['avg_success'],
|
||||
'total_applications': queryset.aggregate(total_apps=Count('times_applied'))['total_apps']
|
||||
}
|
||||
|
||||
return Response(stats)
|
||||
|
||||
|
||||
class AutomatedPostmortemGenerationViewSet(viewsets.ReadOnlyModelViewSet):
|
||||
"""ViewSet for viewing automated postmortem generation logs"""
|
||||
|
||||
queryset = AutomatedPostmortemGeneration.objects.all()
|
||||
serializer_class = AutomatedPostmortemGenerationSerializer
|
||||
permission_classes = [IsAuthenticated]
|
||||
filter_backends = [DjangoFilterBackend, filters.OrderingFilter]
|
||||
filterset_fields = ['status', 'incident', 'generation_trigger']
|
||||
ordering_fields = ['started_at', 'completed_at', 'processing_time']
|
||||
ordering = ['-started_at']
|
||||
|
||||
@action(detail=False, methods=['post'])
|
||||
def generate_postmortem(self, request):
|
||||
"""Generate automated postmortem for an incident"""
|
||||
serializer = PostmortemGenerationRequestSerializer(data=request.data)
|
||||
|
||||
if serializer.is_valid():
|
||||
generator = PostmortemGenerator()
|
||||
try:
|
||||
result = generator.generate_postmortem_for_incident(
|
||||
incident_id=serializer.validated_data['incident_id'],
|
||||
include_timeline=serializer.validated_data['include_timeline'],
|
||||
include_logs=serializer.validated_data['include_logs'],
|
||||
trigger=serializer.validated_data['generation_trigger']
|
||||
)
|
||||
|
||||
return Response({
|
||||
'message': 'Postmortem generation initiated',
|
||||
'generation_id': result['generation_id']
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return Response(
|
||||
{'error': f'Failed to generate postmortem: {str(e)}'},
|
||||
status=status.HTTP_500_INTERNAL_SERVER_ERROR
|
||||
)
|
||||
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
Reference in New Issue
Block a user